entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
sequencelengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
AnchorBoxTransform
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/q5/cq5uvfmzbevghl56cmh2evudsln2sycnseu4ion3zjbukykowqvt.py # Topologically Sorted Source Nodes: [pred_boxes], Original ATen: [aten.stack] # Source node to ATen node mapping: # pred_boxes => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3], -1), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp6 - tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tmp11 = tl.load(in_ptr1 + (x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tmp11 * tmp7 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 * tmp7 tmp16 = tmp15 * tmp8 tmp17 = tmp13 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp4, tmp17, tmp18) tmp20 = tmp0 >= tmp3 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp25 - tmp24 tmp27 = tmp26 * tmp8 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp32 * tmp26 tmp34 = tmp33 * tmp8 tmp35 = tmp31 - tmp34 tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp23, tmp35, tmp36) tmp38 = tmp0 >= tmp21 tmp39 = tl.full([1], 3, tl.int64) tmp40 = tmp0 < tmp39 tmp41 = tmp38 & tmp40 tmp42 = tl.load(in_ptr0 + (x1 + (16*x2)), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tl.load(in_ptr0 + (8 + x1 + (16*x2)), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp44 = tmp43 - tmp42 tmp45 = tmp44 * tmp8 tmp46 = tmp42 + tmp45 tmp47 = tl.load(in_ptr1 + (x1 + (16*x2)), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 * tmp44 tmp49 = tmp46 + tmp48 tmp50 = tl.load(in_ptr1 + (8 + x1 + (16*x2)), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp50 * tmp44 tmp52 = tmp51 * tmp8 tmp53 = tmp49 + tmp52 tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype) tmp55 = tl.where(tmp41, tmp53, tmp54) tmp56 = tmp0 >= tmp39 tmp57 = tl.full([1], 4, tl.int64) tmp58 = tmp0 < tmp57 tmp59 = tl.load(in_ptr0 + (4 + x1 + (16*x2)), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.load(in_ptr0 + (12 + x1 + (16*x2)), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp60 - tmp59 tmp62 = tmp61 * tmp8 tmp63 = tmp59 + tmp62 tmp64 = tl.load(in_ptr1 + (4 + x1 + (16*x2)), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tmp64 * tmp61 tmp66 = tmp63 + tmp65 tmp67 = tl.load(in_ptr1 + (12 + x1 + (16*x2)), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp68 = tmp67 * tmp61 tmp69 = tmp68 * tmp8 tmp70 = tmp66 + tmp69 tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype) tmp72 = tl.where(tmp56, tmp70, tmp71) tmp73 = tl.where(tmp41, tmp55, tmp72) tmp74 = tl.where(tmp23, tmp37, tmp73) tmp75 = tl.where(tmp4, tmp19, tmp74) tl.store(out_ptr0 + (x3), tmp75, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pred_boxes], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from typing import Optional import torch.nn as nn class AnchorBoxTransform(nn.Module): def __init__(self, mean: 'Optional[Tensor]'=None, std: 'Optional[Tensor]'=None, log_length: 'bool'=False): super(AnchorBoxTransform, self).__init__() self.mean = mean self.std = std self.log_length = log_length def forward(self, boxes: 'Tensor', deltas: 'Tensor') ->Tensor: widths = boxes[:, :, 2] - boxes[:, :, 0] heights = boxes[:, :, 3] - boxes[:, :, 1] center_x = boxes[:, :, 0] + 0.5 * widths center_y = boxes[:, :, 1] + 0.5 * heights if self.std is not None: deltas = deltas.mul(self.std) if self.mean is not None: deltas = deltas.add(self.mean) dx, dy, dw, dh = [deltas[:, :, i] for i in range(4)] if self.log_length: dw, dh = [torch.exp(x) for x in (dw, dh)] pred_center_x = center_x + dx * widths pred_center_y = center_y + dy * heights pred_w = dw * widths pred_h = dh * heights pred_boxes_x1 = pred_center_x - 0.5 * pred_w pred_boxes_y1 = pred_center_y - 0.5 * pred_h pred_boxes_x2 = pred_center_x + 0.5 * pred_w pred_boxes_y2 = pred_center_y + 0.5 * pred_h pred_boxes = torch.stack([pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2], dim=-1) return pred_boxes def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import Tensor from typing import Optional import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp6 - tmp5 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tmp11 = tl.load(in_ptr1 + (x1 + 16 * x2), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp12 = tmp11 * tmp7 tmp13 = tmp10 + tmp12 tmp14 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 * tmp7 tmp16 = tmp15 * tmp8 tmp17 = tmp13 - tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp4, tmp17, tmp18) tmp20 = tmp0 >= tmp3 tmp21 = tl.full([1], 2, tl.int64) tmp22 = tmp0 < tmp21 tmp23 = tmp20 & tmp22 tmp24 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp26 = tmp25 - tmp24 tmp27 = tmp26 * tmp8 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp30 = tmp29 * tmp26 tmp31 = tmp28 + tmp30 tmp32 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp32 * tmp26 tmp34 = tmp33 * tmp8 tmp35 = tmp31 - tmp34 tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype) tmp37 = tl.where(tmp23, tmp35, tmp36) tmp38 = tmp0 >= tmp21 tmp39 = tl.full([1], 3, tl.int64) tmp40 = tmp0 < tmp39 tmp41 = tmp38 & tmp40 tmp42 = tl.load(in_ptr0 + (x1 + 16 * x2), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp44 = tmp43 - tmp42 tmp45 = tmp44 * tmp8 tmp46 = tmp42 + tmp45 tmp47 = tl.load(in_ptr1 + (x1 + 16 * x2), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp48 = tmp47 * tmp44 tmp49 = tmp46 + tmp48 tmp50 = tl.load(in_ptr1 + (8 + x1 + 16 * x2), tmp41 & xmask, eviction_policy='evict_last', other=0.0) tmp51 = tmp50 * tmp44 tmp52 = tmp51 * tmp8 tmp53 = tmp49 + tmp52 tmp54 = tl.full(tmp53.shape, 0.0, tmp53.dtype) tmp55 = tl.where(tmp41, tmp53, tmp54) tmp56 = tmp0 >= tmp39 tl.full([1], 4, tl.int64) tmp59 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp61 = tmp60 - tmp59 tmp62 = tmp61 * tmp8 tmp63 = tmp59 + tmp62 tmp64 = tl.load(in_ptr1 + (4 + x1 + 16 * x2), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tmp64 * tmp61 tmp66 = tmp63 + tmp65 tmp67 = tl.load(in_ptr1 + (12 + x1 + 16 * x2), tmp56 & xmask, eviction_policy='evict_last', other=0.0) tmp68 = tmp67 * tmp61 tmp69 = tmp68 * tmp8 tmp70 = tmp66 + tmp69 tmp71 = tl.full(tmp70.shape, 0.0, tmp70.dtype) tmp72 = tl.where(tmp56, tmp70, tmp71) tmp73 = tl.where(tmp41, tmp55, tmp72) tmp74 = tl.where(tmp23, tmp37, tmp73) tmp75 = tl.where(tmp4, tmp19, tmp74) tl.store(out_ptr0 + x3, tmp75, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class AnchorBoxTransformNew(nn.Module): def __init__(self, mean: 'Optional[Tensor]'=None, std: 'Optional[Tensor]'=None, log_length: 'bool'=False): super(AnchorBoxTransformNew, self).__init__() self.mean = mean self.std = std self.log_length = log_length def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TidalPaladin/combustion
AnchorBoxTransform
false
17,985
[ "Apache-2.0" ]
3
69b9a2b9baf90b81ed9098b4f0391f5c15efaee7
https://github.com/TidalPaladin/combustion/tree/69b9a2b9baf90b81ed9098b4f0391f5c15efaee7
TransposedConvModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5z/c5zg352o7ezu3thtl2wktzw27dfbjh5zd5ctbdw6zoqeyriczm22.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 36) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3r/c3r6yugtbsco6e3d3egpj3v5muiupbrxn7l4tpdsbktimuzlfcxj.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 10 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_2, (10, ), (1, )) assert_size_stride(primals_3, (4, 10, 4, 4), (160, 16, 4, 1)) assert_size_stride(primals_4, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 6, 6), (360, 36, 6, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1440, grid=grid(1440), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 10, 8, 8), (640, 64, 8, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 2560, grid=grid(2560), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((10, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 10, 4, 4), (160, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((10, 10, 3, 3), (90, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class TransposedConvModel(torch.nn.Module): def __init__(self): super(TransposedConvModel, self).__init__() self.conv1 = torch.nn.ConvTranspose2d(10, 10, 3) self.relu1 = torch.nn.ReLU() self.conv2 = torch.nn.ConvTranspose2d(10, 10, 3) def forward(self, x): x = self.conv1(x) x = self.relu1(x) x = self.conv2(x) return x def get_inputs(): return [torch.rand([4, 10, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1440 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 36 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 10 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_2, (10,), (1,)) assert_size_stride(primals_3, (4, 10, 4, 4), (160, 16, 4, 1)) assert_size_stride(primals_4, (10, 10, 3, 3), (90, 9, 3, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 10, 6, 6), (360, 36, 6, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1440)](buf1, primals_2, 1440, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 10, 8, 8), (640, 64, 8, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(2560)](buf3, primals_5, 2560, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class TransposedConvModelNew(torch.nn.Module): def __init__(self): super(TransposedConvModelNew, self).__init__() self.conv1 = torch.nn.ConvTranspose2d(10, 10, 3) self.relu1 = torch.nn.ReLU() self.conv2 = torch.nn.ConvTranspose2d(10, 10, 3) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Rohan-Chaudhury/aimet
TransposedConvModel
false
17,986
[ "BSD-3-Clause" ]
3
1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
Downsampling
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/wi/cwiqbpoqi6m2vpqbo7yhlu6k5kw2cosffol744o35qhjqcmj57rw.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_1 => convolution_1 # x_2 => relu # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hh/chh2vels25xtylq5bzfebl6eoopomnq7wro6ud6rt4mdtjm7q6ah.py # Topologically Sorted Source Nodes: [x_4, x_6, x_7, relu_1], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_1 => relu_1 # x_4 => convolution_3 # x_6 => convolution_5 # x_7 => add # Graph fragment: # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_9, %primals_10, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_3, %convolution_5), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(in_out_ptr0 + (x3), tmp8, xmask) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, ), (1, )) assert_size_stride(primals_5, (1, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_6, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 2, 2), (4, 4, 2, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 2, 2), (4, 4, 2, 1)) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1)) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(primals_1, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1)) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf5, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1)) buf7 = buf4; del buf4 # reuse buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4, x_6, x_7, relu_1], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward] triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf7, primals_7, buf6, primals_10, buf8, 64, grid=grid(64), stream=stream0) del buf6 del primals_10 del primals_7 return (buf7, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, buf0, buf2, buf3, buf5, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as M def DepthwiseConv(in_channels, kernel_size, stride, padding): return M.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups= in_channels, bias=False) def PointwiseConv(in_channels, out_channels): return M.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0, bias=True) class CovSepBlock(M.Module): def __init__(self, in_channels, out_channels, kernel_size=5, stride=1, padding=2): super().__init__() self.dc = DepthwiseConv(in_channels, kernel_size, stride=stride, padding=padding) self.pc = PointwiseConv(in_channels, out_channels) def forward(self, x): x = self.dc(x) x = self.pc(x) return x class Downsampling(M.Module): def __init__(self, in_channels, out_channels): super().__init__() self.sepconv = CovSepBlock(in_channels=in_channels, out_channels= out_channels // 4, stride=2, padding=2) self.activate = M.ReLU(inplace=True) self.sepconv2 = CovSepBlock(in_channels=out_channels // 4, out_channels=out_channels, padding=2) self.branchconv = CovSepBlock(in_channels, out_channels, kernel_size=3, stride=2, padding=1) self.activate2 = M.ReLU(inplace=True) def forward(self, x): branch = x x = self.sepconv(x) x = self.activate(x) x = self.sepconv2(x) branch = self.branchconv(branch) x += branch return self.activate2(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as M assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(in_out_ptr0 + x3, tmp8, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (1, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_6, (4, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_9, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 2, 2), (4, 4, 2, 1)) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 1, 2, 2), (4, 4, 2, 1)) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1)) buf5 = extern_kernels.convolution(primals_1, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1)) buf6 = extern_kernels.convolution(buf5, primals_9, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 2, 2), (16, 4, 2, 1)) buf7 = buf4 del buf4 buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(64)]( buf7, primals_7, buf6, primals_10, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf6 del primals_10 del primals_7 return (buf7, primals_1, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, buf0, buf2, buf3, buf5, buf8) def DepthwiseConv(in_channels, kernel_size, stride, padding): return M.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups= in_channels, bias=False) def PointwiseConv(in_channels, out_channels): return M.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, padding=0, bias=True) class CovSepBlock(M.Module): def __init__(self, in_channels, out_channels, kernel_size=5, stride=1, padding=2): super().__init__() self.dc = DepthwiseConv(in_channels, kernel_size, stride=stride, padding=padding) self.pc = PointwiseConv(in_channels, out_channels) def forward(self, x): x = self.dc(x) x = self.pc(x) return x class DownsamplingNew(M.Module): def __init__(self, in_channels, out_channels): super().__init__() self.sepconv = CovSepBlock(in_channels=in_channels, out_channels= out_channels // 4, stride=2, padding=2) self.activate = M.ReLU(inplace=True) self.sepconv2 = CovSepBlock(in_channels=out_channels // 4, out_channels=out_channels, padding=2) self.branchconv = CovSepBlock(in_channels, out_channels, kernel_size=3, stride=2, padding=1) self.activate2 = M.ReLU(inplace=True) def forward(self, input_0): primals_2 = self.sepconv.dc.weight primals_3 = self.sepconv.pc.weight primals_4 = self.sepconv.pc.bias primals_5 = self.sepconv2.dc.weight primals_6 = self.sepconv2.pc.weight primals_7 = self.sepconv2.pc.bias primals_8 = self.branchconv.dc.weight primals_9 = self.branchconv.pc.weight primals_10 = self.branchconv.pc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
SuperbTUM/RAW-image-denoising
Downsampling
false
17,987
[ "MIT" ]
4
9f81be8da6a576f641022707d98b8c37f5c599ab
https://github.com/SuperbTUM/RAW-image-denoising/tree/9f81be8da6a576f641022707d98b8c37f5c599ab
SoftCrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/u2/cu2beycg2t2ghizs6f4qom7bxbxmajhdaakuyq6y2korxywhp6ba.py # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_probs => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/a2/ca2dtp2hwhhlrcxa5accwunmm7doqykmq57slzkicy56ixs7dvsf.py # Topologically Sorted Source Nodes: [neg, log_probs, mul, loss, loss_1], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.mean] # Source node to ATen node mapping: # log_probs => exp, log, sub_1, sum_1 # loss => sum_2 # loss_1 => mean # mul => mul # neg => neg # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_2,), kwargs = {}) triton_per_fused__log_softmax_mean_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_mean_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_mean_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp7 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp10 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp16 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp21 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp26 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp1 = -tmp0 tmp3 = tl_math.exp(tmp2) tmp5 = tl_math.exp(tmp4) tmp6 = tmp3 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tl_math.log(tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp1 * tmp14 tmp17 = -tmp16 tmp18 = tmp4 - tmp13 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp22 = -tmp21 tmp23 = tmp7 - tmp13 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp27 = -tmp26 tmp28 = tmp10 - tmp13 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp35, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [neg, log_probs, mul, loss, loss_1], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.mean] triton_per_fused__log_softmax_mean_mul_neg_sum_1.run(buf3, arg1_1, buf0, 1, 64, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SoftCrossEntropyLoss(nn.Module): """Cross entropy loss with soft label as target """ def __init__(self, num_classes, epsilon=0.1, use_gpu=True, label_smooth =False, batch_average=True): super(SoftCrossEntropyLoss, self).__init__() self.num_classes = num_classes self.epsilon = epsilon self.use_gpu = use_gpu self.logsoftmax = nn.LogSoftmax(dim=1) self.label_smooth = label_smooth self.batch_average = batch_average def forward(self, inputs, targets): """ Args: inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) targets: ground truth labels with shape (batch_size, num_classes) """ log_probs = self.logsoftmax(inputs) if self.use_gpu: targets = targets if self.label_smooth: targets = (1 - self.epsilon ) * targets + self.epsilon / self.num_classes loss = (-targets * log_probs).sum(1) if self.batch_average: loss = loss.mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mean_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp4 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp7 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp10 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp16 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp21 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp26 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = -tmp0 tmp3 = tl_math.exp(tmp2) tmp5 = tl_math.exp(tmp4) tmp6 = tmp3 + tmp5 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp11 = tl_math.exp(tmp10) tmp12 = tmp9 + tmp11 tmp13 = tl_math.log(tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp1 * tmp14 tmp17 = -tmp16 tmp18 = tmp4 - tmp13 tmp19 = tmp17 * tmp18 tmp20 = tmp15 + tmp19 tmp22 = -tmp21 tmp23 = tmp7 - tmp13 tmp24 = tmp22 * tmp23 tmp25 = tmp20 + tmp24 tmp27 = -tmp26 tmp28 = tmp10 - tmp13 tmp29 = tmp27 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = 64.0 tmp35 = tmp33 / tmp34 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp35, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mean_mul_neg_sum_1[grid(1)](buf3, arg1_1, buf0, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf3, class SoftCrossEntropyLossNew(nn.Module): """Cross entropy loss with soft label as target """ def __init__(self, num_classes, epsilon=0.1, use_gpu=True, label_smooth =False, batch_average=True): super(SoftCrossEntropyLossNew, self).__init__() self.num_classes = num_classes self.epsilon = epsilon self.use_gpu = use_gpu self.logsoftmax = nn.LogSoftmax(dim=1) self.label_smooth = label_smooth self.batch_average = batch_average def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Terminator8758/Precise-ICS-master
SoftCrossEntropyLoss
false
17,988
[ "MIT" ]
4
9f4591fee6ab64d9dd91f551355d29562bf663cb
https://github.com/Terminator8758/Precise-ICS-master/tree/9f4591fee6ab64d9dd91f551355d29562bf663cb
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/lb/clbsy4tg5vy626xltvex3nvlz64bqgvmvspdm4ipee3ymrzipslk.py # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, out], Original ATen: [aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # norm => pow_2 # out => div # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_div_pow_sum_0 = async_compile.triton('triton_poi_fused_div_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, out], Original ATen: [aten.pow, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_pow_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Normalize(nn.Module): """ Ln normalization copied from https://github.com/salesforce/CoMatch """ def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_pow_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): """ Ln normalization copied from https://github.com/salesforce/CoMatch """ def __init__(self, power=2): super(NormalizeNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TencentYoutuResearch/Classification-SemiCLS
Normalize
false
17,989
[ "Apache-2.0" ]
4
ceb5546f8d8ba08e18de3b5d9426e6cda177e55e
https://github.com/TencentYoutuResearch/Classification-SemiCLS/tree/ceb5546f8d8ba08e18de3b5d9426e6cda177e55e
SoftmaxAttention
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5z/c5zz2lvkwvls35hi6w5oz64rjzzbgkdbdjxboowcjn73jj4mklp5.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hs/chsw2asddi47qklsfhzfp7peuoaze4fy5gufh2hyk6nd7s35ujp6.py # Topologically Sorted Source Nodes: [mul, result, result_1, sum_1], Original ATen: [aten.mul, aten._softmax, aten.sum] # Source node to ATen node mapping: # mul => mul # result => amax, div, exp, sub, sum_1 # result_1 => mul_1 # sum_1 => sum_2 # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1], True), kwargs = {}) triton_poi_fused__softmax_mul_sum_1 = async_compile.triton('triton_poi_fused__softmax_mul_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*(x0 // 4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 / tmp25 tmp27 = tmp26 * tmp1 tmp28 = tmp18 / tmp25 tmp29 = tmp28 * tmp4 tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp25 tmp32 = tmp31 * tmp8 tmp33 = tmp30 + tmp32 tmp34 = tmp24 / tmp25 tmp35 = tmp34 * tmp12 tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + (x0), tmp14, xmask) tl.store(out_ptr1 + (x0), tmp25, xmask) tl.store(out_ptr2 + (x0), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wa/cwafgwdj3kecgnnj337juqqmqmsph5ryat5rv2cjbqlvj4ukkxuq.py # Topologically Sorted Source Nodes: [mul, result, result_1, add, result_2], Original ATen: [aten.mul, aten._softmax, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mul => mul # result => amax, div, exp, sub # result_1 => mul_1 # result_2 => div_1 # Graph fragment: # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-13), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add), kwargs = {}) triton_poi_fused__softmax_add_div_mul_2 = async_compile.triton('triton_poi_fused__softmax_add_div_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_div_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*(x1 // 4))), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = tmp7 * tmp1 tmp10 = 1e-13 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr0 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3m/c3mht3icko3aphg4xlc2obupeoiwvus5ukzqe6w34n7bvwfl5ifj.py # Topologically Sorted Source Nodes: [contiguous_4, attended_premises], Original ATen: [aten.clone, aten.mul] # Source node to ATen node mapping: # attended_premises => mul_4 # contiguous_4 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm_1, %clone_4), kwargs = {}) triton_poi_fused_clone_mul_3 = async_compile.triton('triton_poi_fused_clone_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3h/c3hgq2aig4njexzvh7ticllf65qt77ydtf732cfb2qnmmje3bjus.py # Topologically Sorted Source Nodes: [mul_2, result_3, result_4, sum_2], Original ATen: [aten.mul, aten._softmax, aten.sum] # Source node to ATen node mapping: # mul_2 => mul_2 # result_3 => amax_1, div_2, exp_1, sub_1, sum_3 # result_4 => mul_3 # sum_2 => sum_4 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %view_4), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_2, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [-1], True), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %view_4), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1], True), kwargs = {}) triton_poi_fused__softmax_mul_sum_4 = async_compile.triton('triton_poi_fused__softmax_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + ((16*(x0 // 4)) + (x0 % 4)), xmask) tmp1 = tl.load(in_ptr1 + (4*(x0 // 4)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + (16*(x0 // 4)) + (x0 % 4)), xmask) tmp4 = tl.load(in_ptr1 + (1 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + (16*(x0 // 4)) + (x0 % 4)), xmask) tmp8 = tl.load(in_ptr1 + (2 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + (16*(x0 // 4)) + (x0 % 4)), xmask) tmp12 = tl.load(in_ptr1 + (3 + (4*(x0 // 4))), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 / tmp25 tmp27 = tmp26 * tmp1 tmp28 = tmp18 / tmp25 tmp29 = tmp28 * tmp4 tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp25 tmp32 = tmp31 * tmp8 tmp33 = tmp30 + tmp32 tmp34 = tmp24 / tmp25 tmp35 = tmp34 * tmp12 tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + (x0), tmp14, xmask) tl.store(out_ptr1 + (x0), tmp25, xmask) tl.store(out_ptr2 + (x0), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/te/ctewcdmjat4njs4qh2ygkapy3akrx77wectfnx6siymtehehiwum.py # Topologically Sorted Source Nodes: [mul_2, result_3, result_4, add_1, result_5], Original ATen: [aten.mul, aten._softmax, aten.add, aten.div] # Source node to ATen node mapping: # add_1 => add_1 # mul_2 => mul_2 # result_3 => amax_1, div_2, exp_1, sub_1 # result_4 => mul_3 # result_5 => div_3 # Graph fragment: # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %view_4), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_2, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_3), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %view_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 1e-13), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, %add_1), kwargs = {}) triton_poi_fused__softmax_add_div_mul_5 = async_compile.triton('triton_poi_fused__softmax_add_div_mul_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_div_mul_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + ((4*x1) + (16*(y0 // 4)) + (y0 % 4)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + (4*(y0 // 4))), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = tmp7 * tmp1 tmp10 = 1e-13 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr0 + (x1 + (4*y0)), tmp12, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg1_1, buf0, 64, grid=grid(64), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous, similarity_matrix], Original ATen: [aten.clone, aten.bmm] extern_kernels.bmm(arg0_1, buf0, out=buf1) buf2 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf3 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf4 = empty_strided_cuda((16, 1), (1, 16), torch.float32) # Topologically Sorted Source Nodes: [mul, result, result_1, sum_1], Original ATen: [aten.mul, aten._softmax, aten.sum] triton_poi_fused__softmax_mul_sum_1.run(buf1, arg2_1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul, result, result_1, add, result_2], Original ATen: [aten.mul, aten._softmax, aten.add, aten.div] triton_poi_fused__softmax_add_div_mul_2.run(buf1, arg2_1, buf2, buf3, buf4, buf5, 64, grid=grid(64), stream=stream0) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [weighted_sum], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), arg1_1, out=buf6) del arg1_1 buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [contiguous_4, attended_premises], Original ATen: [aten.clone, aten.mul] triton_poi_fused_clone_mul_3.run(buf7, arg3_1, 64, grid=grid(64), stream=stream0) buf8 = buf4; del buf4 # reuse buf9 = buf3; del buf3 # reuse buf10 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [mul_2, result_3, result_4, sum_2], Original ATen: [aten.mul, aten._softmax, aten.sum] triton_poi_fused__softmax_mul_sum_4.run(buf1, arg3_1, buf8, buf9, buf10, 16, grid=grid(16), stream=stream0) buf11 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [mul_2, result_3, result_4, add_1, result_5], Original ATen: [aten.mul, aten._softmax, aten.add, aten.div] triton_poi_fused__softmax_add_div_mul_5.run(buf1, arg3_1, buf8, buf9, buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) del arg3_1 del buf10 del buf8 del buf9 buf12 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [weighted_sum_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), arg0_1, out=buf12) del arg0_1 del buf11 buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [contiguous_5, attended_hypotheses], Original ATen: [aten.clone, aten.mul] triton_poi_fused_clone_mul_3.run(buf13, arg2_1, 64, grid=grid(64), stream=stream0) del arg2_1 return (buf7, buf13, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def masked_softmax(tensor, mask): """ Apply a masked softmax on the last dimension of a tensor. The input tensor and mask should be of size (batch, *, sequence_length). Args: tensor: The tensor on which the softmax function must be applied along the last dimension. mask: A mask of the same size as the tensor with 0s in the positions of the values that must be masked and 1s everywhere else. Returns: A tensor of the same size as the inputs containing the result of the softmax. """ tensor_shape = tensor.size() reshaped_tensor = tensor.view(-1, tensor_shape[-1]) while mask.dim() < tensor.dim(): mask = mask.unsqueeze(1) mask = mask.expand_as(tensor).contiguous().float() reshaped_mask = mask.view(-1, mask.size()[-1]) result = nn.functional.softmax(reshaped_tensor * reshaped_mask, dim=-1) result = result * reshaped_mask result = result / (result.sum(dim=-1, keepdim=True) + 1e-13) return result.view(*tensor_shape) def weighted_sum(tensor, weights, mask): """ Apply a weighted sum on the vectors along the last dimension of 'tensor', and mask the vectors in the result with 'mask'. Args: tensor: A tensor of vectors on which a weighted sum must be applied. weights: The weights to use in the weighted sum. mask: A mask to apply on the result of the weighted sum. Returns: A new tensor containing the result of the weighted sum after the mask has been applied on it. """ weighted_sum = weights.bmm(tensor) while mask.dim() < weighted_sum.dim(): mask = mask.unsqueeze(1) mask = mask.transpose(-1, -2) mask = mask.expand_as(weighted_sum).contiguous().float() return weighted_sum * mask class SoftmaxAttention(nn.Module): """ Attention layer taking premises and hypotheses encoded by an RNN as input and computing the soft attention between their elements. The dot product of the encoded vectors in the premises and hypotheses is first computed. The softmax of the result is then used in a weighted sum of the vectors of the premises for each element of the hypotheses, and conversely for the elements of the premises. """ def forward(self, premise_batch, premise_mask, hypothesis_batch, hypothesis_mask): """ Args: premise_batch: A batch of sequences of vectors representing the premises in some NLI task. The batch is assumed to have the size (batch, sequences, vector_dim). premise_mask: A mask for the sequences in the premise batch, to ignore padding data in the sequences during the computation of the attention. hypothesis_batch: A batch of sequences of vectors representing the hypotheses in some NLI task. The batch is assumed to have the size (batch, sequences, vector_dim). hypothesis_mask: A mask for the sequences in the hypotheses batch, to ignore padding data in the sequences during the computation of the attention. Returns: attended_premises: The sequences of attention vectors for the premises in the input batch. attended_hypotheses: The sequences of attention vectors for the hypotheses in the input batch. """ similarity_matrix = premise_batch.bmm(hypothesis_batch.transpose(2, 1).contiguous()) prem_hyp_attn = masked_softmax(similarity_matrix, hypothesis_mask) hyp_prem_attn = masked_softmax(similarity_matrix.transpose(1, 2). contiguous(), premise_mask) attended_premises = weighted_sum(hypothesis_batch, prem_hyp_attn, premise_mask) attended_hypotheses = weighted_sum(premise_batch, hyp_prem_attn, hypothesis_mask) return attended_premises, attended_hypotheses def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4] ), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * (x0 // 4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 / tmp25 tmp27 = tmp26 * tmp1 tmp28 = tmp18 / tmp25 tmp29 = tmp28 * tmp4 tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp25 tmp32 = tmp31 * tmp8 tmp33 = tmp30 + tmp32 tmp34 = tmp24 / tmp25 tmp35 = tmp34 * tmp12 tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + x0, tmp14, xmask) tl.store(out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr2 + x0, tmp36, xmask) @triton.jit def triton_poi_fused__softmax_add_div_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * (x1 // 4)), xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = tmp7 * tmp1 tmp10 = 1e-13 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_clone_mul_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * (x0 // 4) + x0 % 4), xmask) tmp1 = tl.load(in_ptr1 + 4 * (x0 // 4), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (4 + 16 * (x0 // 4) + x0 % 4), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (8 + 16 * (x0 // 4) + x0 % 4), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + 16 * (x0 // 4) + x0 % 4), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * (x0 // 4)), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 * tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 * tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp16 / tmp25 tmp27 = tmp26 * tmp1 tmp28 = tmp18 / tmp25 tmp29 = tmp28 * tmp4 tmp30 = tmp27 + tmp29 tmp31 = tmp21 / tmp25 tmp32 = tmp31 * tmp8 tmp33 = tmp30 + tmp32 tmp34 = tmp24 / tmp25 tmp35 = tmp34 * tmp12 tmp36 = tmp33 + tmp35 tl.store(out_ptr0 + x0, tmp14, xmask) tl.store(out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr2 + x0, tmp36, xmask) @triton.jit def triton_poi_fused__softmax_add_div_mul_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (4 * x1 + 16 * (y0 // 4) + y0 % 4), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x1 + 4 * (y0 // 4)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tmp8 = tmp7 * tmp1 tmp10 = 1e-13 tmp11 = tmp9 + tmp10 tmp12 = tmp8 / tmp11 tl.store(out_ptr0 + (x1 + 4 * y0), tmp12, xmask & ymask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) assert_size_stride(arg3_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg0_1, buf0, out=buf1) buf2 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf3 = empty_strided_cuda((16, 1), (1, 16), torch.float32) buf4 = empty_strided_cuda((16, 1), (1, 16), torch.float32) triton_poi_fused__softmax_mul_sum_1[grid(16)](buf1, arg2_1, buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_add_div_mul_2[grid(64)](buf1, arg2_1, buf2, buf3, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), arg1_1, out=buf6) del arg1_1 buf7 = buf6 del buf6 triton_poi_fused_clone_mul_3[grid(64)](buf7, arg3_1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf4 del buf4 buf9 = buf3 del buf3 buf10 = buf2 del buf2 triton_poi_fused__softmax_mul_sum_4[grid(16)](buf1, arg3_1, buf8, buf9, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = buf5 del buf5 triton_poi_fused__softmax_add_div_mul_5[grid(16, 4)](buf1, arg3_1, buf8, buf9, buf10, buf11, 16, 4, XBLOCK=2, YBLOCK=16, num_warps =1, num_stages=1) del arg3_1 del buf10 del buf8 del buf9 buf12 = buf1 del buf1 extern_kernels.bmm(reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), arg0_1, out=buf12) del arg0_1 del buf11 buf13 = buf12 del buf12 triton_poi_fused_clone_mul_3[grid(64)](buf13, arg2_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg2_1 return buf7, buf13 def masked_softmax(tensor, mask): """ Apply a masked softmax on the last dimension of a tensor. The input tensor and mask should be of size (batch, *, sequence_length). Args: tensor: The tensor on which the softmax function must be applied along the last dimension. mask: A mask of the same size as the tensor with 0s in the positions of the values that must be masked and 1s everywhere else. Returns: A tensor of the same size as the inputs containing the result of the softmax. """ tensor_shape = tensor.size() reshaped_tensor = tensor.view(-1, tensor_shape[-1]) while mask.dim() < tensor.dim(): mask = mask.unsqueeze(1) mask = mask.expand_as(tensor).contiguous().float() reshaped_mask = mask.view(-1, mask.size()[-1]) result = nn.functional.softmax(reshaped_tensor * reshaped_mask, dim=-1) result = result * reshaped_mask result = result / (result.sum(dim=-1, keepdim=True) + 1e-13) return result.view(*tensor_shape) def weighted_sum(tensor, weights, mask): """ Apply a weighted sum on the vectors along the last dimension of 'tensor', and mask the vectors in the result with 'mask'. Args: tensor: A tensor of vectors on which a weighted sum must be applied. weights: The weights to use in the weighted sum. mask: A mask to apply on the result of the weighted sum. Returns: A new tensor containing the result of the weighted sum after the mask has been applied on it. """ weighted_sum = weights.bmm(tensor) while mask.dim() < weighted_sum.dim(): mask = mask.unsqueeze(1) mask = mask.transpose(-1, -2) mask = mask.expand_as(weighted_sum).contiguous().float() return weighted_sum * mask class SoftmaxAttentionNew(nn.Module): """ Attention layer taking premises and hypotheses encoded by an RNN as input and computing the soft attention between their elements. The dot product of the encoded vectors in the premises and hypotheses is first computed. The softmax of the result is then used in a weighted sum of the vectors of the premises for each element of the hypotheses, and conversely for the elements of the premises. """ def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg2_1 = input_1 arg1_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0], output[1]
Taoooo9/Cail_Text_similarity_esimtribert
SoftmaxAttention
false
17,990
[ "Apache-2.0" ]
5
10b0314fdc3fcc60e39737ac563e8538b96ceb19
https://github.com/Taoooo9/Cail_Text_similarity_esimtribert/tree/10b0314fdc3fcc60e39737ac563e8538b96ceb19
Edg_Capture
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/hd/chdlrq3s6tekda24fpmwfqe34bsd3fznlfkwdsakaae76b2mbwsn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution, %convolution_1, %convolution_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 3 x0 = xindex % 16 x2 = (xindex // 48) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + (16*x2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + (x0 + (16*x2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) # Topologically Sorted Source Nodes: [x2_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) # Topologically Sorted Source Nodes: [x3_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1)) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, buf1, buf2, buf3, 192, grid=grid(192), stream=stream0) del buf0 del buf1 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Edg_Capture(nn.Module): def __init__(self): super(Edg_Capture, self).__init__() kernel = [[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]] kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0) self.weight = nn.Parameter(data=kernel, requires_grad=False) def forward(self, x): x1 = x[:, 0] x2 = x[:, 1] x3 = x[:, 2] x1 = F.conv2d(x1.unsqueeze(1), self.weight, padding=1) x2 = F.conv2d(x2.unsqueeze(1), self.weight, padding=1) x3 = F.conv2d(x3.unsqueeze(1), self.weight, padding=1) x = torch.cat([x1, x2, x3], dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 16 * x2), tmp9 & xmask, eviction_policy ='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 3, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 16 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 1, 3, 3), (9, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 0), arg1_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 16), arg1_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = extern_kernels.convolution(reinterpret_tensor(arg0_1, (4, 1, 4, 4), (64, 0, 4, 1), 32), arg1_1, stride=(1, 1), padding=(1, 1 ), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 4, 4), (16, 16, 4, 1)) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(192)](buf0, buf1, buf2, buf3, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del buf2 return buf3, class Edg_CaptureNew(nn.Module): def __init__(self): super(Edg_CaptureNew, self).__init__() kernel = [[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]] kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0) self.weight = nn.Parameter(data=kernel, requires_grad=False) def forward(self, input_0): arg1_1 = self.weight arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
TaoWangzj/PCFAN
Edg_Capture
false
17,991
[ "MIT" ]
7
f6ddc8fd2e72a45431891acf0b25135499c84485
https://github.com/TaoWangzj/PCFAN/tree/f6ddc8fd2e72a45431891acf0b25135499c84485
Encoder
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/l7/cl7yxfbt4ykiq5jvwa5z2y7sq5svbia3hxdlhmplibtjjkzvzuil.py # Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # mixture_w => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1), (4, 1, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [mixture_w], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 16, grid=grid(16), stream=stream0) return (buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): """Estimation of the nonnegative mixture weight by a 1-D conv layer. """ def __init__(self, L, N): super(Encoder, self).__init__() self.L, self.N = L, N self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias= False) def forward(self, mixture): """ Args: mixture: [M, T], M is batch size, T is #samples Returns: mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1 """ mixture = torch.unsqueeze(mixture, 1) mixture_w = F.relu(self.conv1d_U(mixture)) return mixture_w def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'L': 4, 'N': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 1, 4), (4, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(2,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1), (4, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), buf2 class EncoderNew(nn.Module): """Estimation of the nonnegative mixture weight by a 1-D conv layer. """ def __init__(self, L, N): super(EncoderNew, self).__init__() self.L, self.N = L, N self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias= False) def forward(self, input_0): primals_2 = self.conv1d_U.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
ThomasRigoni7/Audio-emotion-recognition-RAVDESS
Encoder
false
17,992
[ "MIT" ]
5
ae44256edfcb320a32696444cd301264e1800866
https://github.com/ThomasRigoni7/Audio-emotion-recognition-RAVDESS/tree/ae44256edfcb320a32696444cd301264e1800866
GAT
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/gm/cgm5etysa63x5lzpynxjnthlpzhtfd5dibwrlzqlchsymr6n5ony.py # Topologically Sorted Source Nodes: [all_combinations_matrix], Original ATen: [aten.cat] # Source node to ATen node mapping: # all_combinations_matrix => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %repeat], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*(x1 // 4)) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr0 + ((4*(x1 % 4)) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4r/c4r7qkyes5rbryciwaphgmxru6ck7iweqdesggebzgrgp5ryzwzx.py # Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # e => gt # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7u/c7uk7o7qqvfjwv2b6fcqgitjir7t3xrokdjdtodslqdv3o6olgqi.py # Topologically Sorted Source Nodes: [e, zero_vec, gt, mul_1, attention, attention_1, e_1, mul_3, attention_3, attention_4, e_2, mul_5, attention_6, attention_7, e_3, mul_7, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => amax, exp, sub, sum_1 # attention_10 => amax_3, exp_3, sub_3, sum_4 # attention_3 => where_4 # attention_4 => amax_1, exp_1, sub_1, sum_2 # attention_6 => where_7 # attention_7 => amax_2, exp_2, sub_2, sum_3 # attention_9 => where_10 # e => mul, where # e_1 => mul_6, where_3 # e_2 => mul_12, where_6 # e_3 => mul_18, where_9 # gt => gt_1 # mul_1 => mul_2 # mul_3 => mul_8 # mul_5 => mul_14 # mul_7 => mul_20 # zero_vec => full_default # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {}) # %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=5] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %primals_4), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_2, %full_default), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_6), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_3, %primals_4), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_8, %full_default), kwargs = {}) # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_12), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_6, %primals_4), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_14, %full_default), kwargs = {}) # %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_7, [1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [1], True), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {}) # %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_18), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_9, %primals_4), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_20, %full_default), kwargs = {}) # %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_10, [1], True), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [1], True), kwargs = {}) triton_poi_fused__softmax_gt_leaky_relu_mul_where_2 = async_compile.triton('triton_poi_fused__softmax_gt_leaky_relu_mul_where_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_gt_leaky_relu_mul_where_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 36, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp14 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp23 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp32 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp49 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp50 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp56 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp62 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp63 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp69 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp70 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp87 = tl.load(in_ptr5 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp88 = tl.load(in_ptr6 + (4*x0), xmask, eviction_policy='evict_last') tmp93 = tl.load(in_ptr5 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp94 = tl.load(in_ptr6 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp100 = tl.load(in_ptr5 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp101 = tl.load(in_ptr6 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp107 = tl.load(in_ptr5 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp108 = tl.load(in_ptr6 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp125 = tl.load(in_ptr7 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp126 = tl.load(in_ptr8 + (4*x0), xmask, eviction_policy='evict_last') tmp131 = tl.load(in_ptr7 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp132 = tl.load(in_ptr8 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp138 = tl.load(in_ptr7 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp139 = tl.load(in_ptr8 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp145 = tl.load(in_ptr7 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp146 = tl.load(in_ptr8 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp11 > tmp1 tmp15 = tmp14 * tmp5 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tmp16 * tmp11 tmp18 = tl.where(tmp12, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp21 = tmp20 > tmp1 tmp24 = tmp23 * tmp5 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tmp25 * tmp20 tmp27 = tl.where(tmp21, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp30 = tmp29 > tmp1 tmp33 = tmp32 * tmp5 tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp34 * tmp29 tmp36 = tl.where(tmp30, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp38 = tmp10 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp18 - tmp37 tmp41 = tl_math.exp(tmp40) tmp42 = tmp39 + tmp41 tmp43 = tmp27 - tmp37 tmp44 = tl_math.exp(tmp43) tmp45 = tmp42 + tmp44 tmp46 = tmp36 - tmp37 tmp47 = tl_math.exp(tmp46) tmp48 = tmp45 + tmp47 tmp51 = tmp50 * tmp5 tmp52 = tl.where(tmp49, tmp50, tmp51) tmp53 = tmp52 * tmp0 tmp54 = tl.where(tmp2, tmp53, tmp9) tmp57 = tmp56 * tmp5 tmp58 = tl.where(tmp55, tmp56, tmp57) tmp59 = tmp58 * tmp11 tmp60 = tl.where(tmp12, tmp59, tmp9) tmp61 = triton_helpers.maximum(tmp54, tmp60) tmp64 = tmp63 * tmp5 tmp65 = tl.where(tmp62, tmp63, tmp64) tmp66 = tmp65 * tmp20 tmp67 = tl.where(tmp21, tmp66, tmp9) tmp68 = triton_helpers.maximum(tmp61, tmp67) tmp71 = tmp70 * tmp5 tmp72 = tl.where(tmp69, tmp70, tmp71) tmp73 = tmp72 * tmp29 tmp74 = tl.where(tmp30, tmp73, tmp9) tmp75 = triton_helpers.maximum(tmp68, tmp74) tmp76 = tmp54 - tmp75 tmp77 = tl_math.exp(tmp76) tmp78 = tmp60 - tmp75 tmp79 = tl_math.exp(tmp78) tmp80 = tmp77 + tmp79 tmp81 = tmp67 - tmp75 tmp82 = tl_math.exp(tmp81) tmp83 = tmp80 + tmp82 tmp84 = tmp74 - tmp75 tmp85 = tl_math.exp(tmp84) tmp86 = tmp83 + tmp85 tmp89 = tmp88 * tmp5 tmp90 = tl.where(tmp87, tmp88, tmp89) tmp91 = tmp90 * tmp0 tmp92 = tl.where(tmp2, tmp91, tmp9) tmp95 = tmp94 * tmp5 tmp96 = tl.where(tmp93, tmp94, tmp95) tmp97 = tmp96 * tmp11 tmp98 = tl.where(tmp12, tmp97, tmp9) tmp99 = triton_helpers.maximum(tmp92, tmp98) tmp102 = tmp101 * tmp5 tmp103 = tl.where(tmp100, tmp101, tmp102) tmp104 = tmp103 * tmp20 tmp105 = tl.where(tmp21, tmp104, tmp9) tmp106 = triton_helpers.maximum(tmp99, tmp105) tmp109 = tmp108 * tmp5 tmp110 = tl.where(tmp107, tmp108, tmp109) tmp111 = tmp110 * tmp29 tmp112 = tl.where(tmp30, tmp111, tmp9) tmp113 = triton_helpers.maximum(tmp106, tmp112) tmp114 = tmp92 - tmp113 tmp115 = tl_math.exp(tmp114) tmp116 = tmp98 - tmp113 tmp117 = tl_math.exp(tmp116) tmp118 = tmp115 + tmp117 tmp119 = tmp105 - tmp113 tmp120 = tl_math.exp(tmp119) tmp121 = tmp118 + tmp120 tmp122 = tmp112 - tmp113 tmp123 = tl_math.exp(tmp122) tmp124 = tmp121 + tmp123 tmp127 = tmp126 * tmp5 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tmp128 * tmp0 tmp130 = tl.where(tmp2, tmp129, tmp9) tmp133 = tmp132 * tmp5 tmp134 = tl.where(tmp131, tmp132, tmp133) tmp135 = tmp134 * tmp11 tmp136 = tl.where(tmp12, tmp135, tmp9) tmp137 = triton_helpers.maximum(tmp130, tmp136) tmp140 = tmp139 * tmp5 tmp141 = tl.where(tmp138, tmp139, tmp140) tmp142 = tmp141 * tmp20 tmp143 = tl.where(tmp21, tmp142, tmp9) tmp144 = triton_helpers.maximum(tmp137, tmp143) tmp147 = tmp146 * tmp5 tmp148 = tl.where(tmp145, tmp146, tmp147) tmp149 = tmp148 * tmp29 tmp150 = tl.where(tmp30, tmp149, tmp9) tmp151 = triton_helpers.maximum(tmp144, tmp150) tmp152 = tmp130 - tmp151 tmp153 = tl_math.exp(tmp152) tmp154 = tmp136 - tmp151 tmp155 = tl_math.exp(tmp154) tmp156 = tmp153 + tmp155 tmp157 = tmp143 - tmp151 tmp158 = tl_math.exp(tmp157) tmp159 = tmp156 + tmp158 tmp160 = tmp150 - tmp151 tmp161 = tl_math.exp(tmp160) tmp162 = tmp159 + tmp161 tl.store(out_ptr0 + (x0), tmp37, xmask) tl.store(out_ptr1 + (x0), tmp48, xmask) tl.store(out_ptr2 + (x0), tmp75, xmask) tl.store(out_ptr3 + (x0), tmp86, xmask) tl.store(out_ptr4 + (x0), tmp113, xmask) tl.store(out_ptr5 + (x0), tmp124, xmask) tl.store(out_ptr6 + (x0), tmp151, xmask) tl.store(out_ptr7 + (x0), tmp162, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vh/cvh3omzpjpur2qzzbmbujlq35plaicyhlsjx6y5cttskus7nxnu4.py # Topologically Sorted Source Nodes: [e, zero_vec, gt, mul_1, attention, attention_1, e_1, mul_3, attention_3, attention_4, e_2, mul_5, attention_6, attention_7, e_3, mul_7, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] # Source node to ATen node mapping: # attention => where_1 # attention_1 => div, exp, sub # attention_10 => div_3, exp_3, sub_3 # attention_3 => where_4 # attention_4 => div_1, exp_1, sub_1 # attention_6 => where_7 # attention_7 => div_2, exp_2, sub_2 # attention_9 => where_10 # e => mul, where # e_1 => mul_6, where_3 # e_2 => mul_12, where_6 # e_3 => mul_18, where_9 # gt => gt_1 # mul_1 => mul_2 # mul_3 => mul_8 # mul_5 => mul_14 # mul_7 => mul_20 # zero_vec => full_default # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {}) # %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=5] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %primals_4), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_2, %full_default), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {}) # %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_6), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_3, %primals_4), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_8, %full_default), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {}) # %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_12), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_6, %primals_4), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_14, %full_default), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {}) # %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_3), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {}) # %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_18), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_9, %primals_4), kwargs = {}) # %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_20, %full_default), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {}) # %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_4), kwargs = {}) triton_poi_fused__softmax_gt_leaky_relu_mul_where_3 = async_compile.triton('triton_poi_fused__softmax_gt_leaky_relu_mul_where_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: '*fp32', 10: '*fp32', 11: '*i1', 12: '*fp32', 13: '*fp32', 14: '*i1', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_gt_leaky_relu_mul_where_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_3(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1) tmp4 = tl.load(in_out_ptr0 + (x2), xmask) tmp11 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + (x2), xmask).to(tl.int1) tmp17 = tl.load(in_out_ptr1 + (x2), xmask) tmp22 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr7 + (x2), xmask).to(tl.int1) tmp28 = tl.load(in_out_ptr2 + (x2), xmask) tmp33 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr9 + (x1), xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr10 + (x2), xmask).to(tl.int1) tmp39 = tl.load(in_out_ptr3 + (x2), xmask) tmp44 = tl.load(in_ptr11 + (x1), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr12 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp10 - tmp11 tmp13 = tl_math.exp(tmp12) tmp15 = tmp13 / tmp14 tmp18 = tmp17 * tmp5 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tmp19 * tmp0 tmp21 = tl.where(tmp2, tmp20, tmp9) tmp23 = tmp21 - tmp22 tmp24 = tl_math.exp(tmp23) tmp26 = tmp24 / tmp25 tmp29 = tmp28 * tmp5 tmp30 = tl.where(tmp27, tmp28, tmp29) tmp31 = tmp30 * tmp0 tmp32 = tl.where(tmp2, tmp31, tmp9) tmp34 = tmp32 - tmp33 tmp35 = tl_math.exp(tmp34) tmp37 = tmp35 / tmp36 tmp40 = tmp39 * tmp5 tmp41 = tl.where(tmp38, tmp39, tmp40) tmp42 = tmp41 * tmp0 tmp43 = tl.where(tmp2, tmp42, tmp9) tmp45 = tmp43 - tmp44 tmp46 = tl_math.exp(tmp45) tmp48 = tmp46 / tmp47 tl.store(in_out_ptr0 + (x2), tmp15, xmask) tl.store(in_out_ptr1 + (x2), tmp26, xmask) tl.store(in_out_ptr2 + (x2), tmp37, xmask) tl.store(in_out_ptr3 + (x2), tmp48, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hh/chh6w2ghklyzywtaoo3lcd3onberm3miuk3c3djkdig4mqokonfe.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_1 => cat_4 # Graph fragment: # %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_2, %where_5, %where_8, %where_11], 1), kwargs = {}) triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tmp40 = tl.full([1], 16, tl.int64) tmp41 = tmp0 < tmp40 tmp42 = tl.load(in_ptr3 + ((4*x1) + ((-12) + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + (x2), tmp52, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7q/c7qxp5amlt573ebyc4vr5bdrcekznowi27nc3atyyhkvvfbeglaj.py # Topologically Sorted Source Nodes: [zero_vec, gt, e_4, mul_9, attention_12, attention_13], Original ATen: [aten.mul, aten.gt, aten.leaky_relu, aten.where, aten._softmax] # Source node to ATen node mapping: # attention_12 => where_13 # attention_13 => amax_4, exp_4, sub_4, sum_5 # e_4 => mul_24, where_12 # gt => gt_1 # mul_9 => mul_26 # zero_vec => full_default # Graph fragment: # %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=5] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_4, 4), kwargs = {}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %squeeze_4, %mul_24), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_12, %primals_4), kwargs = {}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_26, %full_default), kwargs = {}) # %amax_4 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_13, [1], True), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_13, %amax_4), kwargs = {}) # %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [1], True), kwargs = {}) triton_poi_fused__softmax_gt_leaky_relu_mul_where_5 = async_compile.triton('triton_poi_fused__softmax_gt_leaky_relu_mul_where_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_gt_leaky_relu_mul_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1) tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp14 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp23 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1) tmp32 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp11 > tmp1 tmp15 = tmp14 * tmp5 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tmp16 * tmp11 tmp18 = tl.where(tmp12, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp21 = tmp20 > tmp1 tmp24 = tmp23 * tmp5 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tmp25 * tmp20 tmp27 = tl.where(tmp21, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp30 = tmp29 > tmp1 tmp33 = tmp32 * tmp5 tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp34 * tmp29 tmp36 = tl.where(tmp30, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp38 = tmp10 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp18 - tmp37 tmp41 = tl_math.exp(tmp40) tmp42 = tmp39 + tmp41 tmp43 = tmp27 - tmp37 tmp44 = tl_math.exp(tmp43) tmp45 = tmp42 + tmp44 tmp46 = tmp36 - tmp37 tmp47 = tl_math.exp(tmp46) tmp48 = tmp45 + tmp47 tl.store(out_ptr0 + (x0), tmp37, xmask) tl.store(out_ptr1 + (x0), tmp48, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/64/c645hfatxnt35clupdl5n32m52nkqohijamnnr3ub6ty3mxgczz5.py # Topologically Sorted Source Nodes: [zero_vec, gt, e_4, mul_9, attention_12, attention_13], Original ATen: [aten.mul, aten.gt, aten.leaky_relu, aten.where, aten._softmax] # Source node to ATen node mapping: # attention_12 => where_13 # attention_13 => div_4, exp_4, sub_4 # e_4 => mul_24, where_12 # gt => gt_1 # mul_9 => mul_26 # zero_vec => full_default # Graph fragment: # %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %gt_1 : [num_users=5] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_4, 4), kwargs = {}) # %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %squeeze_4, %mul_24), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_12, %primals_4), kwargs = {}) # %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_26, %full_default), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_13, %amax_4), kwargs = {}) # %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {}) # %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_4, %sum_5), kwargs = {}) triton_poi_fused__softmax_gt_leaky_relu_mul_where_6 = async_compile.triton('triton_poi_fused__softmax_gt_leaky_relu_mul_where_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_gt_leaky_relu_mul_where_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1) tmp4 = tl.load(in_out_ptr0 + (x2), xmask) tmp11 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp10 - tmp11 tmp13 = tl_math.exp(tmp12) tmp15 = tmp13 / tmp14 tl.store(in_out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/y4/cy4q6k3x34qlbumrxjyfnwv53krh74c3eepxbtkavm6j27mr4zsq.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_3 => expm1_4, gt_14, mul_27, mul_29, where_14 # Graph fragment: # %gt_14 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mm_14, 0), kwargs = {}) # %mul_27 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_14, 1.0), kwargs = {}) # %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_27,), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.0), kwargs = {}) # %where_14 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_14, %mul_27, %mul_29), kwargs = {}) triton_poi_fused_elu_7 = async_compile.triton('triton_poi_fused_elu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/eb/cebgqpmvrrz56qwhrzp5oesln4zcvcgup4lijd6qafo3bb6q63wd.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax_5, sub_5 # Graph fragment: # %amax_5 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_14, [1], True), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_14, %amax_5), kwargs = {}) triton_poi_fused__log_softmax_8 = async_compile.triton('triton_poi_fused__log_softmax_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/y5/cy56p2zpioq2nksbkizl4timhunod54nb52p2swp5hrox5lwhanf.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp_5, log, sub_6, sum_6 # Graph fragment: # %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_5,), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_5, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_6,), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %log), kwargs = {}) triton_poi_fused__log_softmax_9 = async_compile.triton('triton_poi_fused__log_softmax_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [all_combinations_matrix], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, buf1, 128, grid=grid(128), stream=stream0) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm] extern_kernels.mm(buf1, primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_1], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_5, out=buf8) del primals_5 buf9 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [all_combinations_matrix_1], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf8, buf9, 128, grid=grid(128), stream=stream0) buf10 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm] extern_kernels.mm(buf9, primals_6, out=buf10) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_1], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf10, buf11, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_2], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_7, out=buf16) del primals_7 buf17 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [all_combinations_matrix_2], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf16, buf17, 128, grid=grid(128), stream=stream0) buf18 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm] extern_kernels.mm(buf17, primals_8, out=buf18) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_2], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf18, buf19, 16, grid=grid(16), stream=stream0) buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_3], Original ATen: [aten.mm] extern_kernels.mm(primals_1, primals_9, out=buf24) del primals_9 buf25 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [all_combinations_matrix_3], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf24, buf25, 128, grid=grid(128), stream=stream0) buf26 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.mm] extern_kernels.mm(buf25, primals_10, out=buf26) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_3], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf26, buf27, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf20 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf28 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32) # Topologically Sorted Source Nodes: [e, zero_vec, gt, mul_1, attention, attention_1, e_1, mul_3, attention_3, attention_4, e_2, mul_5, attention_6, attention_7, e_3, mul_7, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_gt_leaky_relu_mul_where_2.run(primals_4, buf3, buf2, buf11, buf10, buf19, buf18, buf27, buf26, buf4, buf5, buf12, buf13, buf20, buf21, buf28, buf29, 4, grid=grid(4), stream=stream0) buf6 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse buf14 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse buf22 = reinterpret_tensor(buf18, (4, 4), (4, 1), 0); del buf18 # reuse buf30 = reinterpret_tensor(buf26, (4, 4), (4, 1), 0); del buf26 # reuse # Topologically Sorted Source Nodes: [e, zero_vec, gt, mul_1, attention, attention_1, e_1, mul_3, attention_3, attention_4, e_2, mul_5, attention_6, attention_7, e_3, mul_7, attention_9, attention_10], Original ATen: [aten.leaky_relu, aten.mul, aten.gt, aten.where, aten._softmax] triton_poi_fused__softmax_gt_leaky_relu_mul_where_3.run(buf6, buf14, buf22, buf30, primals_4, buf3, buf4, buf5, buf11, buf12, buf13, buf19, buf20, buf21, buf27, buf28, buf29, 16, grid=grid(16), stream=stream0) del buf12 del buf13 del buf20 del buf21 del buf28 del buf29 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.mm] extern_kernels.mm(buf6, buf0, out=buf7) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.mm] extern_kernels.mm(buf14, buf8, out=buf15) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.mm] extern_kernels.mm(buf22, buf16, out=buf23) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.mm] extern_kernels.mm(buf30, buf24, out=buf31) buf32 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat] triton_poi_fused_cat_4.run(buf7, buf15, buf23, buf31, buf32, 64, grid=grid(64), stream=stream0) buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [hidden_4], Original ATen: [aten.mm] extern_kernels.mm(buf32, primals_11, out=buf33) buf34 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [all_combinations_matrix_4], Original ATen: [aten.cat] triton_poi_fused_cat_0.run(buf33, buf34, 128, grid=grid(128), stream=stream0) buf35 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.mm] extern_kernels.mm(buf34, primals_12, out=buf35) buf36 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [e_4], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf35, buf36, 16, grid=grid(16), stream=stream0) buf37 = buf5; del buf5 # reuse buf38 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [zero_vec, gt, e_4, mul_9, attention_12, attention_13], Original ATen: [aten.mul, aten.gt, aten.leaky_relu, aten.where, aten._softmax] triton_poi_fused__softmax_gt_leaky_relu_mul_where_5.run(primals_4, buf36, buf35, buf37, buf38, 4, grid=grid(4), stream=stream0) buf39 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0); del buf35 # reuse # Topologically Sorted Source Nodes: [zero_vec, gt, e_4, mul_9, attention_12, attention_13], Original ATen: [aten.mul, aten.gt, aten.leaky_relu, aten.where, aten._softmax] triton_poi_fused__softmax_gt_leaky_relu_mul_where_6.run(buf39, primals_4, buf36, buf37, buf38, 16, grid=grid(16), stream=stream0) del buf37 del buf38 buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prime_4], Original ATen: [aten.mm] extern_kernels.mm(buf39, buf33, out=buf40) buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.elu] triton_poi_fused_elu_7.run(buf40, buf41, 16, grid=grid(16), stream=stream0) buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_8.run(buf41, buf42, 16, grid=grid(16), stream=stream0) buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_9.run(buf42, buf43, 16, grid=grid(16), stream=stream0) del buf42 return (buf43, buf41, primals_4, buf3, buf6, buf7, buf11, buf14, buf15, buf19, buf22, buf23, buf27, buf30, buf31, buf36, buf39, buf40, buf43, reinterpret_tensor(buf33, (4, 4), (1, 4), 0), reinterpret_tensor(buf34, (8, 16), (1, 8), 0), reinterpret_tensor(primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf32, (16, 4), (1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(buf24, (4, 4), (1, 4), 0), reinterpret_tensor(buf25, (8, 16), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf16, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (8, 16), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf8, (4, 4), (1, 4), 0), reinterpret_tensor(buf9, (8, 16), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (8, 16), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.autograd import torch.nn as nn class GraphAttConv(nn.Module): def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttConv, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.empty(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.attention_w = nn.Parameter(torch.empty(size=(2 * out_features, 1)) ) nn.init.xavier_uniform_(self.attention_w.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, h, adj): hidden = torch.mm(h, self.W) attention_input = self._prepare_attentional_mechanism_input(hidden) e = self.leakyrelu(torch.matmul(attention_input, self.attention_w). squeeze(2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e * adj, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, hidden) if self.concat: return F.elu(h_prime) else: return h_prime def _prepare_attentional_mechanism_input(self, Wh): N = Wh.size(0) Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0) Wh_repeated_alternating = Wh.repeat(N, 1) all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=1) return all_combinations_matrix.view(N, N, 2 * self.out_features) class GAT(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttConv(nfeat, nhid, dropout=dropout, alpha =alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttConv(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False) def forward(self, x, adj): x = F.dropout(x, self.dropout, training=self.training) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout, training=self.training) x = F.elu(self.out_att(x, adj)) return F.log_softmax(x, dim=1), x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.autograd import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * (x1 // 4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp14 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp23 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp32 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp49 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp50 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp56 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp62 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp63 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp69 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp70 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp87 = tl.load(in_ptr5 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp88 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last') tmp93 = tl.load(in_ptr5 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp94 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp100 = tl.load(in_ptr5 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp101 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp107 = tl.load(in_ptr5 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp108 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp125 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp126 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last') tmp131 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp132 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp138 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp139 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp145 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp146 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp11 > tmp1 tmp15 = tmp14 * tmp5 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tmp16 * tmp11 tmp18 = tl.where(tmp12, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp21 = tmp20 > tmp1 tmp24 = tmp23 * tmp5 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tmp25 * tmp20 tmp27 = tl.where(tmp21, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp30 = tmp29 > tmp1 tmp33 = tmp32 * tmp5 tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp34 * tmp29 tmp36 = tl.where(tmp30, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp38 = tmp10 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp18 - tmp37 tmp41 = tl_math.exp(tmp40) tmp42 = tmp39 + tmp41 tmp43 = tmp27 - tmp37 tmp44 = tl_math.exp(tmp43) tmp45 = tmp42 + tmp44 tmp46 = tmp36 - tmp37 tmp47 = tl_math.exp(tmp46) tmp48 = tmp45 + tmp47 tmp51 = tmp50 * tmp5 tmp52 = tl.where(tmp49, tmp50, tmp51) tmp53 = tmp52 * tmp0 tmp54 = tl.where(tmp2, tmp53, tmp9) tmp57 = tmp56 * tmp5 tmp58 = tl.where(tmp55, tmp56, tmp57) tmp59 = tmp58 * tmp11 tmp60 = tl.where(tmp12, tmp59, tmp9) tmp61 = triton_helpers.maximum(tmp54, tmp60) tmp64 = tmp63 * tmp5 tmp65 = tl.where(tmp62, tmp63, tmp64) tmp66 = tmp65 * tmp20 tmp67 = tl.where(tmp21, tmp66, tmp9) tmp68 = triton_helpers.maximum(tmp61, tmp67) tmp71 = tmp70 * tmp5 tmp72 = tl.where(tmp69, tmp70, tmp71) tmp73 = tmp72 * tmp29 tmp74 = tl.where(tmp30, tmp73, tmp9) tmp75 = triton_helpers.maximum(tmp68, tmp74) tmp76 = tmp54 - tmp75 tmp77 = tl_math.exp(tmp76) tmp78 = tmp60 - tmp75 tmp79 = tl_math.exp(tmp78) tmp80 = tmp77 + tmp79 tmp81 = tmp67 - tmp75 tmp82 = tl_math.exp(tmp81) tmp83 = tmp80 + tmp82 tmp84 = tmp74 - tmp75 tmp85 = tl_math.exp(tmp84) tmp86 = tmp83 + tmp85 tmp89 = tmp88 * tmp5 tmp90 = tl.where(tmp87, tmp88, tmp89) tmp91 = tmp90 * tmp0 tmp92 = tl.where(tmp2, tmp91, tmp9) tmp95 = tmp94 * tmp5 tmp96 = tl.where(tmp93, tmp94, tmp95) tmp97 = tmp96 * tmp11 tmp98 = tl.where(tmp12, tmp97, tmp9) tmp99 = triton_helpers.maximum(tmp92, tmp98) tmp102 = tmp101 * tmp5 tmp103 = tl.where(tmp100, tmp101, tmp102) tmp104 = tmp103 * tmp20 tmp105 = tl.where(tmp21, tmp104, tmp9) tmp106 = triton_helpers.maximum(tmp99, tmp105) tmp109 = tmp108 * tmp5 tmp110 = tl.where(tmp107, tmp108, tmp109) tmp111 = tmp110 * tmp29 tmp112 = tl.where(tmp30, tmp111, tmp9) tmp113 = triton_helpers.maximum(tmp106, tmp112) tmp114 = tmp92 - tmp113 tmp115 = tl_math.exp(tmp114) tmp116 = tmp98 - tmp113 tmp117 = tl_math.exp(tmp116) tmp118 = tmp115 + tmp117 tmp119 = tmp105 - tmp113 tmp120 = tl_math.exp(tmp119) tmp121 = tmp118 + tmp120 tmp122 = tmp112 - tmp113 tmp123 = tl_math.exp(tmp122) tmp124 = tmp121 + tmp123 tmp127 = tmp126 * tmp5 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tmp128 * tmp0 tmp130 = tl.where(tmp2, tmp129, tmp9) tmp133 = tmp132 * tmp5 tmp134 = tl.where(tmp131, tmp132, tmp133) tmp135 = tmp134 * tmp11 tmp136 = tl.where(tmp12, tmp135, tmp9) tmp137 = triton_helpers.maximum(tmp130, tmp136) tmp140 = tmp139 * tmp5 tmp141 = tl.where(tmp138, tmp139, tmp140) tmp142 = tmp141 * tmp20 tmp143 = tl.where(tmp21, tmp142, tmp9) tmp144 = triton_helpers.maximum(tmp137, tmp143) tmp147 = tmp146 * tmp5 tmp148 = tl.where(tmp145, tmp146, tmp147) tmp149 = tmp148 * tmp29 tmp150 = tl.where(tmp30, tmp149, tmp9) tmp151 = triton_helpers.maximum(tmp144, tmp150) tmp152 = tmp130 - tmp151 tmp153 = tl_math.exp(tmp152) tmp154 = tmp136 - tmp151 tmp155 = tl_math.exp(tmp154) tmp156 = tmp153 + tmp155 tmp157 = tmp143 - tmp151 tmp158 = tl_math.exp(tmp157) tmp159 = tmp156 + tmp158 tmp160 = tmp150 - tmp151 tmp161 = tl_math.exp(tmp160) tmp162 = tmp159 + tmp161 tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp48, xmask) tl.store(out_ptr2 + x0, tmp75, xmask) tl.store(out_ptr3 + x0, tmp86, xmask) tl.store(out_ptr4 + x0, tmp113, xmask) tl.store(out_ptr5 + x0, tmp124, xmask) tl.store(out_ptr6 + x0, tmp151, xmask) tl.store(out_ptr7 + x0, tmp162, xmask) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_3(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp4 = tl.load(in_out_ptr0 + x2, xmask) tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr4 + x2, xmask).to(tl.int1) tmp17 = tl.load(in_out_ptr1 + x2, xmask) tmp22 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr7 + x2, xmask).to(tl.int1) tmp28 = tl.load(in_out_ptr2 + x2, xmask) tmp33 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr10 + x2, xmask).to(tl.int1) tmp39 = tl.load(in_out_ptr3 + x2, xmask) tmp44 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp10 - tmp11 tmp13 = tl_math.exp(tmp12) tmp15 = tmp13 / tmp14 tmp18 = tmp17 * tmp5 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tmp19 * tmp0 tmp21 = tl.where(tmp2, tmp20, tmp9) tmp23 = tmp21 - tmp22 tmp24 = tl_math.exp(tmp23) tmp26 = tmp24 / tmp25 tmp29 = tmp28 * tmp5 tmp30 = tl.where(tmp27, tmp28, tmp29) tmp31 = tmp30 * tmp0 tmp32 = tl.where(tmp2, tmp31, tmp9) tmp34 = tmp32 - tmp33 tmp35 = tl_math.exp(tmp34) tmp37 = tmp35 / tmp36 tmp40 = tmp39 * tmp5 tmp41 = tl.where(tmp38, tmp39, tmp40) tmp42 = tmp41 * tmp0 tmp43 = tl.where(tmp2, tmp42, tmp9) tmp45 = tmp43 - tmp44 tmp46 = tl_math.exp(tmp45) tmp48 = tmp46 / tmp47 tl.store(in_out_ptr0 + x2, tmp15, xmask) tl.store(in_out_ptr1 + x2, tmp26, xmask) tl.store(in_out_ptr2 + x2, tmp37, xmask) tl.store(in_out_ptr3 + x2, tmp48, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x2, tmp52, xmask) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp14 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp23 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp32 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp11 > tmp1 tmp15 = tmp14 * tmp5 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tmp16 * tmp11 tmp18 = tl.where(tmp12, tmp17, tmp9) tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp21 = tmp20 > tmp1 tmp24 = tmp23 * tmp5 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tmp25 * tmp20 tmp27 = tl.where(tmp21, tmp26, tmp9) tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp30 = tmp29 > tmp1 tmp33 = tmp32 * tmp5 tmp34 = tl.where(tmp31, tmp32, tmp33) tmp35 = tmp34 * tmp29 tmp36 = tl.where(tmp30, tmp35, tmp9) tmp37 = triton_helpers.maximum(tmp28, tmp36) tmp38 = tmp10 - tmp37 tmp39 = tl_math.exp(tmp38) tmp40 = tmp18 - tmp37 tmp41 = tl_math.exp(tmp40) tmp42 = tmp39 + tmp41 tmp43 = tmp27 - tmp37 tmp44 = tl_math.exp(tmp43) tmp45 = tmp42 + tmp44 tmp46 = tmp36 - tmp37 tmp47 = tl_math.exp(tmp46) tmp48 = tmp45 + tmp47 tl.store(out_ptr0 + x0, tmp37, xmask) tl.store(out_ptr1 + x0, tmp48, xmask) @triton.jit def triton_poi_fused__softmax_gt_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp4 = tl.load(in_out_ptr0 + x2, xmask) tmp11 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = 4.0 tmp6 = tmp4 * tmp5 tmp7 = tl.where(tmp3, tmp4, tmp6) tmp8 = tmp7 * tmp0 tmp9 = -8999999815811072.0 tmp10 = tl.where(tmp2, tmp8, tmp9) tmp12 = tmp10 - tmp11 tmp13 = tl_math.exp(tmp12) tmp15 = tmp13 / tmp14 tl.store(in_out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_elu_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) @triton.jit def triton_poi_fused__log_softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_2, out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf1, primals_3, out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_5, out=buf8) del primals_5 buf9 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf8, buf9, 128, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_6, out=buf10) buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_7, out=buf16) del primals_7 buf17 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf16, buf17, 128, XBLOCK=128, num_warps=4, num_stages=1) buf18 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf17, primals_8, out=buf18) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_1, primals_9, out=buf24) del primals_9 buf25 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf24, buf25, 128, XBLOCK=128, num_warps=4, num_stages=1) buf26 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf25, primals_10, out=buf26) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf26, buf27, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf20 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf28 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_gt_leaky_relu_mul_where_2[grid(4)](primals_4, buf3, buf2, buf11, buf10, buf19, buf18, buf27, buf26, buf4, buf5, buf12, buf13, buf20, buf21, buf28, buf29, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 buf14 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0) del buf10 buf22 = reinterpret_tensor(buf18, (4, 4), (4, 1), 0) del buf18 buf30 = reinterpret_tensor(buf26, (4, 4), (4, 1), 0) del buf26 triton_poi_fused__softmax_gt_leaky_relu_mul_where_3[grid(16)](buf6, buf14, buf22, buf30, primals_4, buf3, buf4, buf5, buf11, buf12, buf13, buf19, buf20, buf21, buf27, buf28, buf29, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf12 del buf13 del buf20 del buf21 del buf28 del buf29 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf6, buf0, out=buf7) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf14, buf8, out=buf15) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf22, buf16, out=buf23) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf30, buf24, out=buf31) buf32 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_cat_4[grid(64)](buf7, buf15, buf23, buf31, buf32, 64, XBLOCK=64, num_warps=1, num_stages=1) buf33 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf32, primals_11, out=buf33) buf34 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf33, buf34, 128, XBLOCK=128, num_warps=4, num_stages=1) buf35 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf34, primals_12, out=buf35) buf36 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf35, buf36, 16, XBLOCK=16, num_warps=1, num_stages=1) buf37 = buf5 del buf5 buf38 = buf4 del buf4 triton_poi_fused__softmax_gt_leaky_relu_mul_where_5[grid(4)](primals_4, buf36, buf35, buf37, buf38, 4, XBLOCK=4, num_warps=1, num_stages=1) buf39 = reinterpret_tensor(buf35, (4, 4), (4, 1), 0) del buf35 triton_poi_fused__softmax_gt_leaky_relu_mul_where_6[grid(16)](buf39, primals_4, buf36, buf37, buf38, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf37 del buf38 buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf39, buf33, out=buf40) buf41 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_elu_7[grid(16)](buf40, buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_8[grid(16)](buf41, buf42, 16, XBLOCK= 16, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_9[grid(16)](buf42, buf43, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf42 return (buf43, buf41, primals_4, buf3, buf6, buf7, buf11, buf14, buf15, buf19, buf22, buf23, buf27, buf30, buf31, buf36, buf39, buf40, buf43, reinterpret_tensor(buf33, (4, 4), (1, 4), 0), reinterpret_tensor(buf34, (8, 16), (1, 8), 0), reinterpret_tensor( primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf32, (16, 4), (1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(buf24, (4, 4), (1, 4), 0), reinterpret_tensor( buf25, (8, 16), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf16, (4, 4), (1, 4), 0), reinterpret_tensor( buf17, (8, 16), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), ( 1, 1), 0), reinterpret_tensor(buf8, (4, 4), (1, 4), 0), reinterpret_tensor(buf9, (8, 16), (1, 8), 0), reinterpret_tensor( primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(buf1, (8, 16), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0)) class GraphAttConv(nn.Module): def __init__(self, in_features, out_features, dropout, alpha, concat=True): super(GraphAttConv, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(torch.empty(size=(in_features, out_features))) nn.init.xavier_uniform_(self.W.data, gain=1.414) self.attention_w = nn.Parameter(torch.empty(size=(2 * out_features, 1)) ) nn.init.xavier_uniform_(self.attention_w.data, gain=1.414) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, h, adj): hidden = torch.mm(h, self.W) attention_input = self._prepare_attentional_mechanism_input(hidden) e = self.leakyrelu(torch.matmul(attention_input, self.attention_w). squeeze(2)) zero_vec = -9000000000000000.0 * torch.ones_like(e) attention = torch.where(adj > 0, e * adj, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, hidden) if self.concat: return F.elu(h_prime) else: return h_prime def _prepare_attentional_mechanism_input(self, Wh): N = Wh.size(0) Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=0) Wh_repeated_alternating = Wh.repeat(N, 1) all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=1) return all_combinations_matrix.view(N, N, 2 * self.out_features) class GATNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttConv(nfeat, nhid, dropout=dropout, alpha =alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) self.out_att = GraphAttConv(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False) def forward(self, input_0, input_1): primals_1 = self.attention_0.W primals_3 = self.attention_0.attention_w primals_2 = self.attention_1.W primals_6 = self.attention_1.attention_w primals_4 = self.attention_2.W primals_8 = self.attention_2.attention_w primals_5 = self.attention_3.W primals_10 = self.attention_3.attention_w primals_11 = self.out_att.W primals_12 = self.out_att.attention_w primals_7 = input_0 primals_9 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
SsGood/MMGL
GAT
false
17,993
[ "MIT" ]
6
ea769e46fffb42559e764e2912c5b1dc17c10af2
https://github.com/SsGood/MMGL/tree/ea769e46fffb42559e764e2912c5b1dc17c10af2
SCANLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/cv/ccv6wuuk2f3t36jk6z6siizklj3kweutyi3yvvr2vh7sazv7nrd4.py # Topologically Sorted Source Nodes: [anchors_prob], Original ATen: [aten._softmax] # Source node to ATen node mapping: # anchors_prob => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yt/cytqs2smvhzqhhhv5nhgfsoz7g7pop2pi3eoeztc4dtuktnwv56m.py # Topologically Sorted Source Nodes: [anchors_prob], Original ATen: [aten._softmax] # Source node to ATen node mapping: # anchors_prob => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/l4/cl4gmejslpfrefblzb32mrp3a2hsnbvay2cugp4bbkngvugzynuu.py # Topologically Sorted Source Nodes: [consistency_loss, mean, x_, log, b, sum_1, entropy_loss, mul_1, total_loss], Original ATen: [aten.binary_cross_entropy, aten.mean, aten.clamp, aten.log, aten.mul, aten.sum, aten.neg, aten.sub] # Source node to ATen node mapping: # b => mul_2 # consistency_loss => full_default_1, full_default_2, full_default_3, log, log1p, maximum, maximum_1, mean, mul, neg, sub_3 # entropy_loss => neg_1 # log => log_1 # mean => mean_1 # mul_1 => mul_3 # sum_1 => sum_3 # total_loss => sub_4 # x_ => clamp_min # Graph fragment: # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default_2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default_1, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%squeeze,), kwargs = {}) # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_3), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %maximum_1), kwargs = {}) # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%div, [0]), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mean_1, 1e-08), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_min, %log_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %neg_1 : [num_users=2] = call_function[target=torch.ops.aten.neg.default](args = (%sum_3,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, 2.0), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %mul_3), kwargs = {}) triton_per_fused_binary_cross_entropy_clamp_log_mean_mul_neg_sub_sum_2 = async_compile.triton('triton_per_fused_binary_cross_entropy_clamp_log_mean_mul_neg_sub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_clamp_log_mean_mul_neg_sub_sum_2', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_clamp_log_mean_mul_neg_sub_sum_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp16 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1e-08 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = tl_math.log(tmp10) tmp12 = tmp10 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp17 = -tmp16 tmp18 = libdevice.log1p(tmp17) tmp19 = -100.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = 0.0 tmp22 = tmp21 * tmp20 tmp23 = tl_math.log(tmp16) tmp24 = triton_helpers.maximum(tmp23, tmp19) tmp25 = tmp22 - tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = tmp28 / tmp7 tmp30 = -tmp15 tmp31 = 2.0 tmp32 = tmp30 * tmp31 tmp33 = tmp29 - tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp29, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp30, None) tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [anchors_prob], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [anchors_prob], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 16), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [positives_prob], Original ATen: [aten._softmax] triton_poi_fused__softmax_0.run(arg1_1, buf2, 16, grid=grid(16), stream=stream0) del arg1_1 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [positives_prob], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) del buf2 buf4 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [positives_prob, bmm], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf3, out=buf4) del buf3 buf7 = empty_strided_cuda((), (), torch.float32) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5; del buf5 # reuse buf8 = buf7; del buf7 # reuse buf9 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [consistency_loss, mean, x_, log, b, sum_1, entropy_loss, mul_1, total_loss], Original ATen: [aten.binary_cross_entropy, aten.mean, aten.clamp, aten.log, aten.mul, aten.sum, aten.neg, aten.sub] triton_per_fused_binary_cross_entropy_clamp_log_mean_mul_neg_sub_sum_2.run(buf6, buf8, buf1, buf4, buf9, 1, 4, grid=grid(1), stream=stream0) del buf1 del buf4 return (buf9, buf6, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F def entropy(x, input_as_probabilities): """ Helper function to compute the entropy over the batch input: batch w/ shape [b, num_classes] output: entropy value [is ideally -log(num_classes)] """ if input_as_probabilities: x_ = torch.clamp(x, min=1e-08) b = x_ * torch.log(x_) else: b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1) if len(b.size()) == 2: return -b.sum(dim=1).mean() elif len(b.size()) == 1: return -b.sum() else: raise ValueError('Input tensor is %d-Dimensional' % len(b.size())) class SCANLoss(nn.Module): def __init__(self, entropy_weight=2.0): super(SCANLoss, self).__init__() self.softmax = nn.Softmax(dim=1) self.bce = nn.BCELoss() self.entropy_weight = entropy_weight def forward(self, anchors, neighbors): """ input: - anchors: logits for anchor images w/ shape [b, num_classes] - neighbors: logits for neighbor images w/ shape [b, num_classes] output: - Loss """ b, n = anchors.size() anchors_prob = self.softmax(anchors) positives_prob = self.softmax(neighbors) similarity = torch.bmm(anchors_prob.view(b, 1, n), positives_prob. view(b, n, 1)).squeeze() ones = torch.ones_like(similarity) consistency_loss = self.bce(similarity, ones) entropy_loss = entropy(torch.mean(anchors_prob, 0), input_as_probabilities=True) total_loss = consistency_loss - self.entropy_weight * entropy_loss return total_loss, consistency_loss, entropy_loss def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 1])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused_binary_cross_entropy_clamp_log_mean_mul_neg_sub_sum_2( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr0 + (4 + r0), None) tmp3 = tl.load(in_ptr0 + (8 + r0), None) tmp5 = tl.load(in_ptr0 + (12 + r0), None) tmp16 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = 1e-08 tmp10 = triton_helpers.maximum(tmp8, tmp9) tmp11 = tl_math.log(tmp10) tmp12 = tmp10 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tmp17 = -tmp16 tmp18 = libdevice.log1p(tmp17) tmp19 = -100.0 tmp20 = triton_helpers.maximum(tmp18, tmp19) tmp21 = 0.0 tmp22 = tmp21 * tmp20 tmp23 = tl_math.log(tmp16) tmp24 = triton_helpers.maximum(tmp23, tmp19) tmp25 = tmp22 - tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = tmp28 / tmp7 tmp30 = -tmp15 tmp31 = 2.0 tmp32 = tmp30 * tmp31 tmp33 = tmp29 - tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp29, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp30, None) tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 16), 0) del buf0 triton_poi_fused__softmax_0[grid(16)](arg1_1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0 ), buf3, out=buf4) del buf3 buf7 = empty_strided_cuda((), (), torch.float32) buf5 = empty_strided_cuda((), (), torch.float32) buf6 = buf5 del buf5 buf8 = buf7 del buf7 buf9 = empty_strided_cuda((), (), torch.float32) triton_per_fused_binary_cross_entropy_clamp_log_mean_mul_neg_sub_sum_2[ grid(1)](buf6, buf8, buf1, buf4, buf9, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf4 return buf9, buf6, buf8 def entropy(x, input_as_probabilities): """ Helper function to compute the entropy over the batch input: batch w/ shape [b, num_classes] output: entropy value [is ideally -log(num_classes)] """ if input_as_probabilities: x_ = torch.clamp(x, min=1e-08) b = x_ * torch.log(x_) else: b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1) if len(b.size()) == 2: return -b.sum(dim=1).mean() elif len(b.size()) == 1: return -b.sum() else: raise ValueError('Input tensor is %d-Dimensional' % len(b.size())) class SCANLossNew(nn.Module): def __init__(self, entropy_weight=2.0): super(SCANLossNew, self).__init__() self.softmax = nn.Softmax(dim=1) self.bce = nn.BCELoss() self.entropy_weight = entropy_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1], output[2]
TencentYoutuResearch/ActiveLearning-SDM
SCANLoss
false
17,994
[ "Apache-2.0" ]
4
0ee700e59451131536b7509ff3d4b266835ac01b
https://github.com/TencentYoutuResearch/ActiveLearning-SDM/tree/0ee700e59451131536b7509ff3d4b266835ac01b
Concat
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/s4/cs4pkkjggfcthm4kl2zulal3bwhdrqeesztyfumrxfwx4npi3rts.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %arg1_1],), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 64) x0 = xindex % 64 x2 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (64*x1)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (64*((-4) + x1))), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class Concat(torch.nn.Module): """ Concat module for a functional concat""" def __init__(self, axis: 'int'=0): super(Concat, self).__init__() self.axis = axis def forward(self, x, y): """ Forward-pass routine for divide op """ return torch.cat((x, y), self.axis) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x0 = xindex % 64 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 64 * (-4 + x1)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((8, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class ConcatNew(torch.nn.Module): """ Concat module for a functional concat""" def __init__(self, axis: 'int'=0): super(ConcatNew, self).__init__() self.axis = axis def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Rohan-Chaudhury/aimet
Concat
false
17,995
[ "BSD-3-Clause" ]
3
1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
BasicBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/mk/cmkt25rjegcpcftx5fdfteyfr5hqwf3lke3iwc7nmp4syrkfnvnr.py # Topologically Sorted Source Nodes: [conv2d, out, add], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward] # Source node to ATen node mapping: # add => add # conv2d => convolution # out => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %relu), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_add_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp4 + tmp4 tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + (x3), tmp5, xmask) tl.store(out_ptr1 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d, out, add], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_add_convolution_relu_threshold_backward_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 return (buf1, primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn class BasicBlock(nn.Module): def __init__(self, input_dim, width, block_depth): super(BasicBlock, self).__init__() self.block_depth = block_depth self.conv1 = nn.Conv2d(input_dim, width, kernel_size=3, padding=1) if block_depth > 1: self.conv2 = nn.Conv2d(width, width, kernel_size=3, padding=1) if block_depth > 2: self.conv3 = nn.Conv2d(width, width, kernel_size=3, padding=1) if block_depth > 3: self.conv4 = nn.Conv2d(width, width, kernel_size=3, padding=1) if block_depth > 4: raise BaseException('block_depth > 4 is not implemented.') def forward(self, x): out = F.relu(self.conv1(x)) out1 = out if self.block_depth > 1: out = F.relu(self.conv2(out)) if self.block_depth > 2: out = F.relu(self.conv3(out)) if self.block_depth > 3: out = F.relu(self.conv4(out)) return out + out1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'width': 4, 'block_depth': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp4 + tmp4 tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + x3, tmp5, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_convolution_relu_threshold_backward_0[grid(256)]( buf0, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, primals_1, primals_3, buf2 class BasicBlockNew(nn.Module): def __init__(self, input_dim, width, block_depth): super(BasicBlockNew, self).__init__() self.block_depth = block_depth self.conv1 = nn.Conv2d(input_dim, width, kernel_size=3, padding=1) if block_depth > 1: self.conv2 = nn.Conv2d(width, width, kernel_size=3, padding=1) if block_depth > 2: self.conv3 = nn.Conv2d(width, width, kernel_size=3, padding=1) if block_depth > 3: self.conv4 = nn.Conv2d(width, width, kernel_size=3, padding=1) if block_depth > 4: raise BaseException('block_depth > 4 is not implemented.') def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TomHeaven/Pixel-wise-Estimation-of-Signal-Dependent-Image-Noise-using-Deep-Residual-Learning
BasicBlock
false
17,996
[ "MIT" ]
10
7f2a57312f7cec76e5d7016825f75ee9bbd170f5
https://github.com/TomHeaven/Pixel-wise-Estimation-of-Signal-Dependent-Image-Noise-using-Deep-Residual-Learning/tree/7f2a57312f7cec76e5d7016825f75ee9bbd170f5
KLDivLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/kq/ckqglep2uxxmtpvso6erxkbhcljhsx4d2chzqc2ps72gtorqejiz.py # Topologically Sorted Source Nodes: [targets_], Original ATen: [aten._softmax] # Source node to ATen node mapping: # targets_ => amax_1, exp_1, sub_2 # Graph fragment: # %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax_1), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/im/cimjqfyrnbi2v4eg3hbj3b2rzjznianhv7isimhj2shrdgg2krgd.py # Topologically Sorted Source Nodes: [preds_], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # preds_ => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ob/cobdg6tfk3bpsjjj5cgcbws6re74kkct5oomao3swhrjcnqnxpd2.py # Topologically Sorted Source Nodes: [targets_, kl_div, preds_], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub] # Source node to ATen node mapping: # kl_div => eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, where, where_1 # preds_ => exp, log, sub_1, sum_1 # targets_ => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) triton_poi_fused__log_softmax__softmax_mul_sub_xlogy_2 = async_compile.triton('triton_poi_fused__log_softmax__softmax_mul_sub_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__softmax_mul_sub_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax__softmax_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (x3), xmask) tmp18 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tl.store(in_out_ptr0 + (x3), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [targets_], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [preds_], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [targets_, kl_div, preds_], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub] triton_poi_fused__log_softmax__softmax_mul_sub_xlogy_2.run(buf3, buf0, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class KLDivLoss(torch.nn.KLDivLoss): def __init__(self, reduction='none'): super().__init__(reduction=reduction) def forward(self, preds, targets): """ Applies ``log_softmax`` to ``pred`` and ``softmax`` to ``targets`` prior to computing KL-Divergence loss. These operations are performed due to the requirements of the PyTorch API for KLDivLoss. """ preds_ = torch.nn.functional.log_softmax(preds, dim=1) targets_ = torch.nn.functional.softmax(targets, dim=1) return super(KLDivLoss, self).forward(preds_, targets_) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax__softmax_mul_sub_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + x3, xmask) tmp18 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tl.store(in_out_ptr0 + x3, tmp32, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = buf1 del buf1 triton_poi_fused__log_softmax__softmax_mul_sub_xlogy_2[grid(256)](buf3, buf0, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf2 return buf3, class KLDivLossNew(torch.nn.KLDivLoss): def __init__(self, reduction='none'): super().__init__(reduction=reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Thesys-lab/learned-coded-computation
KLDivLoss
false
17,997
[ "Apache-2.0" ]
8
c5c32bcfb7cc4a9f52079f648373e6972c19eff9
https://github.com/Thesys-lab/learned-coded-computation/tree/c5c32bcfb7cc4a9f52079f648373e6972c19eff9
CharbonnierLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ld/cldtt4ebftddfj4rfkhdg5qms3hjdmulchilkr5fnpjj7n4w67nv.py # Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.sum] # Source node to ATen node mapping: # add => add # diff => sub # loss => sum_1 # mul => mul # sqrt => sqrt # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-06), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sqrt,), kwargs = {}) triton_per_fused_add_mul_sqrt_sub_sum_0 = async_compile.triton('triton_per_fused_add_mul_sqrt_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sqrt_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mul_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1e-06 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_mul_sqrt_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class CharbonnierLoss(nn.Module): """Charbonnier Loss (L1)""" def __init__(self, eps=1e-06): super(CharbonnierLoss, self).__init__() self.eps = eps def forward(self, x, y): diff = x - y loss = torch.sum(torch.sqrt(diff * diff + self.eps)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mul_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = 1e-06 tmp5 = tmp3 + tmp4 tmp6 = libdevice.sqrt(tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_mul_sqrt_sub_sum_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf0, class CharbonnierLossNew(nn.Module): """Charbonnier Loss (L1)""" def __init__(self, eps=1e-06): super(CharbonnierLossNew, self).__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TevenLeScao/BasicSR
CharbonnierLoss
false
17,998
[ "Apache-2.0" ]
4
1a7bd8754de00f3a9c9f2031acfc447350459ea0
https://github.com/TevenLeScao/BasicSR/tree/1a7bd8754de00f3a9c9f2031acfc447350459ea0
ResNetV2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/hz/chz262223ztmcitbymyotiqsvjr3l6gp5x6k63aubvaetfwq5c4x.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/az/cazvf33aclbntgyixs3zlm6bdzs672xtry2xl6pc3l3sjzwrnks5.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/sf/csfbo2wg4qzrcculqiz2kklr2dwz3q6kpztuuavooookip4gpwcb.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 65536 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ee/ceez5v32pmp6hauztc7szgqwvpdnusf5mgc6cf6rinvmyyc4ppy3.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 262144 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = (yindex // 512) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/kv/ckvv3mgfwbew7lwg7tli4hrvc6wkf3gcuiopfq6uzo2jflit5kjb.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1048576 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = (yindex // 1024) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (1024*x2) + (9216*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tq/ctq2x6mrawngngeszzndggwfd2bz7lxnkcgwy3eookkvcgsmbmlm.py # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add => add # sqrt => sqrt # sub => sub # var_mean => var_mean # w => div # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_5 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (147*x0)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp21, xmask) tl.store(out_ptr1 + (r1 + (147*x0)), tmp23, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/m3/cm3n5ekaortbl77idbui2xmhlmtcsqlrv4oaeruokxpvsuvsvhqo.py # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # input_2 => add_1, rsqrt, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {}) triton_red_fused_native_group_norm_6 = async_compile.triton('triton_red_fused_native_group_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = (rindex // 8) tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (262144*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mt/cmtg2zsnl4olc5sxsmbvfg2di4exdy56jubcadkq7sn6qdlelfmk.py # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # input_2 => add_2, mul_1 # input_3 => relu # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {}) triton_poi_fused_native_group_norm_relu_7 = async_compile.triton('triton_poi_fused_native_group_norm_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = (xindex // 262144) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ud/cudj76u3b4mzlnpi4o4vnk6dkohedv7mmzojvnpfo7zztl2hyfir.py # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # input_4 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = (xindex // 256) % 15 x2 = (xindex // 3840) % 15 x3 = (xindex // 57600) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp3 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp5 = tl.load(in_ptr0 + (8192 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp7 = tl.load(in_ptr0 + (8448 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp9 = tl.load(in_ptr0 + (8704 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp11 = tl.load(in_ptr0 + (16384 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp13 = tl.load(in_ptr0 + (16640 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp15 = tl.load(in_ptr0 + (16896 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + (x4), tmp16, xmask) tl.store(out_ptr1 + (x4), tmp41, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cs/ccsgujn67tem25z4snogd4rju56f7asf2z2xc2ql3y3igzsnjscl.py # Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_1 => add_3 # sqrt_1 => sqrt_1 # sub_1 => sub_2 # var_mean_1 => var_mean_2 # w_1 => div_1 # Graph fragment: # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_5, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_5, %getitem_7), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {}) # %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt_1), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_9 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1024, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 1024 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/i4/ci4rwmv2id7wstvj7qm5un3hrgdcamj4q62ufrbb6n5adcpimmkt.py # Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # residual_1 => add_4, rsqrt_1, var_mean_3 # Graph fragment: # %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) triton_per_fused_native_group_norm_10 = async_compile.triton('triton_per_fused_native_group_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4096 rnumel = 225 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r2 = rindex x0 = xindex % 1024 x1 = (xindex // 1024) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (1024*r2) + (230400*x1)), rmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 225, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 225.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + (x3), tmp21, None) tl.store(out_ptr0 + (x3), tmp10, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/6r/c6rfrerv7panozxnzp73m2thxx6l2lqq7tw25ie4puri4uh6kx7l.py # Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_2 => add_6 # sqrt_2 => sqrt_2 # sub_2 => sub_4 # var_mean_2 => var_mean_4 # w_2 => div_2 # Graph fragment: # %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_8, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_8, %getitem_11), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {}) # %sqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_6,), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_4, %sqrt_2), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_11 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2o/c2okwb5r2fv3rodljvytgs3j26pe6epbaa6a53sllayucfr3sexp.py # Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_2 => add_7, rsqrt_2, var_mean_5 # Graph fragment: # %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-06), kwargs = {}) # %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {}) triton_red_fused_native_group_norm_12 = async_compile.triton('triton_red_fused_native_group_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 1800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = (rindex // 8) tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (57600*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 1800.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qp/cqp4gk4pz5rl3pprhmap3ohrhccllkfmmgueyb4fdcjy6ftemrgu.py # Topologically Sorted Source Nodes: [group_norm_2, y], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_2 => add_8, mul_5 # y => relu_1 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_17), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %unsqueeze_14), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_8,), kwargs = {}) triton_poi_fused_native_group_norm_relu_13 = async_compile.triton('triton_poi_fused_native_group_norm_relu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 256 x2 = (xindex // 57600) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1800.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qr/cqrfsf7fhthcojobqb7bkj6vrlnl6imcqg2kuotqyor4lyj3q3ua.py # Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_3 => add_9 # sqrt_3 => sqrt_3 # sub_3 => sub_6 # var_mean_3 => var_mean_6 # w_3 => div_3 # Graph fragment: # %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_11, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_11, %getitem_15), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_14, 1e-05), kwargs = {}) # %sqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_9,), kwargs = {}) # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, %sqrt_3), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_14 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[256, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2304*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/up/cupdc7hkcg7hqukyfzftk6ra3ygb42kffogwk2nvfxxzpndmpuhj.py # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # y_2 => add_13, rsqrt_4, var_mean_9 # Graph fragment: # %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_8, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_20, 1e-06), kwargs = {}) # %rsqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_13,), kwargs = {}) triton_red_fused_native_group_norm_15 = async_compile.triton('triton_red_fused_native_group_norm_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = (rindex // 32) tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (230400*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 7200.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/je/cjewebrstwucuhlzbdwqnnheuneff3dtsebypna3bj4ockphn64e.py # Topologically Sorted Source Nodes: [residual_1, y_2, add_5, y_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_5 => add_15 # residual_1 => add_5, mul_3 # y_2 => add_14, mul_9 # y_3 => relu_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, %unsqueeze_29), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %unsqueeze_26), kwargs = {}) # %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_14), kwargs = {}) # %relu_3 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_15,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_16 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr): xnumel = 921600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 230400) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (1024*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (1024*x2)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x3), None) tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 225.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 7200.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ws/cwsgr4oguxqnced2rlthk64z5mojyyfimduuly37pnfms2qnmkhr.py # Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_6 => add_16 # sqrt_5 => sqrt_5 # sub_5 => sub_10 # var_mean_5 => var_mean_10 # w_5 => div_5 # Graph fragment: # %var_mean_10 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_17, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_17, %getitem_23), kwargs = {}) # %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_22, 1e-05), kwargs = {}) # %sqrt_5 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_16,), kwargs = {}) # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_10, %sqrt_5), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_17 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 256 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3w/c3w3xmpevdbqtb4kcv5zbzzab67qqu6r7z4h6nykl4hyfmipsgmz.py # Topologically Sorted Source Nodes: [y_6, add_9, y_7], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_9 => add_25 # y_6 => add_24, mul_15 # y_7 => relu_6 # Graph fragment: # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, %unsqueeze_47), kwargs = {}) # %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, %unsqueeze_44), kwargs = {}) # %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_3, %add_24), kwargs = {}) # %relu_6 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_25,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_18 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 921600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 230400) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x3), None) tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 7200.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (x3), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/l6/cl6ds72qdtm7l4ji675aledkpyulcfuvi6mhfak5qfaas3ggpu2j.py # Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_18 => add_46 # sqrt_14 => sqrt_14 # sub_14 => sub_28 # var_mean_14 => var_mean_28 # w_14 => div_14 # Graph fragment: # %var_mean_28 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_44, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_44, %getitem_59), kwargs = {}) # %add_46 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_58, 1e-05), kwargs = {}) # %sqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_46,), kwargs = {}) # %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_28, %sqrt_14), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_19 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 2048 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/pp/cpppktbeb533e6oo6ajdyeorq3qtp33lh7guwbxmdu2lwl7xvlo4.py # Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # residual_3 => add_47, rsqrt_14, var_mean_29 # Graph fragment: # %var_mean_29 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_28, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_47 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_60, 1e-05), kwargs = {}) # %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_47,), kwargs = {}) triton_per_fused_native_group_norm_20 = async_compile.triton('triton_per_fused_native_group_norm_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[8192, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 8192 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2048 x1 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (2048*r2) + (131072*x1)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 64, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 64.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x3), tmp18, None) tl.store(out_ptr0 + (x3), tmp8, None) tl.store(out_ptr1 + (x3), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/55/c55pe56n5m7o7fvilclfmmtuppi7moypaj2l37et3lqbcqxhcke4.py # Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_19 => add_49 # sqrt_15 => sqrt_15 # sub_15 => sub_30 # var_mean_15 => var_mean_30 # w_15 => div_15 # Graph fragment: # %var_mean_30 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_47, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_47, %getitem_63), kwargs = {}) # %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_62, 1e-05), kwargs = {}) # %sqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_49,), kwargs = {}) # %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_30, %sqrt_15), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_21 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[512, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 512 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zw/czwvgvholvm2dmzatdm2xjy7pfihqfg5lgvzlbvvhvqp7li7ob46.py # Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_15 => add_50, rsqrt_15, var_mean_31 # Graph fragment: # %var_mean_31 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_64, 1e-06), kwargs = {}) # %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_50,), kwargs = {}) triton_red_fused_native_group_norm_22 = async_compile.triton('triton_red_fused_native_group_norm_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 3600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (115200*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 3600.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gc/cgcrreey62yg5lcuoofqiebp66ujbbnzxqwegh5u5p3o3vgf6i5r.py # Topologically Sorted Source Nodes: [group_norm_15, y_16], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_15 => add_51, mul_31 # y_16 => relu_13 # Graph fragment: # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %unsqueeze_95), kwargs = {}) # %add_51 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_31, %unsqueeze_92), kwargs = {}) # %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_51,), kwargs = {}) triton_poi_fused_native_group_norm_relu_23 = async_compile.triton('triton_poi_fused_native_group_norm_relu_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 460800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = (xindex // 115200) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 3600.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/uj/cujbd72tdmi6fd474ibcyktkp5fgzbiskgva5yvr7hlmbbakbpq2.py # Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_20 => add_52 # sqrt_16 => sqrt_16 # sub_16 => sub_32 # var_mean_16 => var_mean_32 # w_16 => div_16 # Graph fragment: # %var_mean_32 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_50, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_50, %getitem_67), kwargs = {}) # %add_52 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_66, 1e-05), kwargs = {}) # %sqrt_16 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_52,), kwargs = {}) # %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_32, %sqrt_16), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_24 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4608*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/rk/crkdcrvwrokjjpoveucqlscwu7eep65y2aqz3dpvq7zs6vvdtu5r.py # Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_16 => add_53, rsqrt_16, var_mean_33 # Graph fragment: # %var_mean_33 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_32, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_53 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_68, 1e-06), kwargs = {}) # %rsqrt_16 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_53,), kwargs = {}) triton_per_fused_native_group_norm_25 = async_compile.triton('triton_per_fused_native_group_norm_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_25', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = (rindex // 16) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (32768*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ph/cphsfhc7dxfhcojeosic3xscyzjhc5v5fx4hwku7f4i472zyxkod.py # Topologically Sorted Source Nodes: [group_norm_16, y_17], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_16 => add_54, mul_33 # y_17 => relu_14 # Graph fragment: # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_33, %unsqueeze_101), kwargs = {}) # %add_54 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_33, %unsqueeze_98), kwargs = {}) # %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_54,), kwargs = {}) triton_poi_fused_native_group_norm_relu_26 = async_compile.triton('triton_poi_fused_native_group_norm_relu_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = (xindex // 32768) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/f4/cf4ln4u2zxiclbcujh7hsnyl2rhqytq6jzwdj2mwxwf76y6lwsjy.py # Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_21 => add_55 # sqrt_17 => sqrt_17 # sub_17 => sub_34 # var_mean_17 => var_mean_34 # w_17 => div_17 # Graph fragment: # %var_mean_34 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_53, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_53, %getitem_71), kwargs = {}) # %add_55 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_70, 1e-05), kwargs = {}) # %sqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_55,), kwargs = {}) # %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_34, %sqrt_17), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_27 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_27', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 2048 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (512*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (512*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/kk/ckknskhbsc7jkzzxthvydg4rasmmdpn2bxkazlsjv5zudgrd6icc.py # Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # y_18 => add_56, rsqrt_17, var_mean_35 # Graph fragment: # %var_mean_35 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_34, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_56 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_72, 1e-06), kwargs = {}) # %rsqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_56,), kwargs = {}) triton_red_fused_native_group_norm_28 = async_compile.triton('triton_red_fused_native_group_norm_28', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7z/c7z6s6fdyi6jzki2yiean6hh52igmt6vofwvvfecwv2clh3kwesc.py # Topologically Sorted Source Nodes: [residual_3, y_18, add_22, y_19], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_22 => add_58 # residual_3 => add_48, mul_29 # y_18 => add_57, mul_35 # y_19 => relu_15 # Graph fragment: # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_29, %unsqueeze_89), kwargs = {}) # %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_29, %unsqueeze_86), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_35, %unsqueeze_107), kwargs = {}) # %add_57 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_35, %unsqueeze_104), kwargs = {}) # %add_58 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_48, %add_57), kwargs = {}) # %relu_15 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_58,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_29 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_29', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_29', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (2048*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (2048*x2)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x3), None) tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 4096.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yx/cyxlt45o524ykzngatdfdgjgbsts47db35xlpi6k3o3slrrtk2uy.py # Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_23 => add_59 # sqrt_18 => sqrt_18 # sub_18 => sub_36 # var_mean_18 => var_mean_36 # w_18 => div_18 # Graph fragment: # %var_mean_36 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_56, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_56, %getitem_75), kwargs = {}) # %add_59 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_74, 1e-05), kwargs = {}) # %sqrt_18 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_59,), kwargs = {}) # %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_36, %sqrt_18), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_30 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_30', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_30', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cp/ccp7mjnc5yh3rylb2avn6m4y6vcmeakpsyjlgqxu232ibnflcsim.py # Topologically Sorted Source Nodes: [y_22, add_26, y_23], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_26 => add_68 # y_22 => add_67, mul_41 # y_23 => relu_18 # Graph fragment: # %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_41, %unsqueeze_125), kwargs = {}) # %add_67 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_41, %unsqueeze_122), kwargs = {}) # %add_68 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_15, %add_67), kwargs = {}) # %relu_18 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_68,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_31 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_31', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x3), None) tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 4096.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (x3), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/uf/cufh65ntg4w7fj7sskax65hh4imsxuvhdnz5hex3dfxjpquetqcr.py # Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_35 => add_89 # sqrt_27 => sqrt_27 # sub_27 => sub_54 # var_mean_27 => var_mean_54 # w_27 => div_27 # Graph fragment: # %var_mean_54 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_83, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_83, %getitem_111), kwargs = {}) # %add_89 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_110, 1e-05), kwargs = {}) # %sqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_89,), kwargs = {}) # %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_54, %sqrt_27), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_32 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_32', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4096, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_32', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4096 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ya/cya4x3inshwssrzlwxiwnce5kohgufrirvvyp54r2iouxbaayu5s.py # Topologically Sorted Source Nodes: [residual_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # residual_5 => add_90, rsqrt_27, var_mean_55 # Graph fragment: # %var_mean_55 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_54, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_90 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_112, 1e-05), kwargs = {}) # %rsqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_90,), kwargs = {}) triton_per_fused_native_group_norm_33 = async_compile.triton('triton_per_fused_native_group_norm_33', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_33', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16384 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = (xindex // 4096) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (65536*x1)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 16, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x3), tmp18, None) tl.store(out_ptr0 + (x3), tmp8, None) tl.store(out_ptr1 + (x3), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5t/c5tpnhfsnpcigrj7z5npj3yzzqiceaoh5txeov37a57iml67m6zh.py # Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_36 => add_92 # sqrt_28 => sqrt_28 # sub_28 => sub_56 # var_mean_28 => var_mean_56 # w_28 => div_28 # Graph fragment: # %var_mean_56 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_86, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_86, %getitem_115), kwargs = {}) # %add_92 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_114, 1e-05), kwargs = {}) # %sqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_92,), kwargs = {}) # %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_56, %sqrt_28), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_34 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_34', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_34', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xu/cxu7hzsn7wpsi5zfniuopxptxyhj3flzb6yx4pgz6zlkaxzxlowj.py # Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_28 => add_93, rsqrt_28, var_mean_57 # Graph fragment: # %var_mean_57 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_56, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_93 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_116, 1e-06), kwargs = {}) # %rsqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_93,), kwargs = {}) triton_red_fused_native_group_norm_35 = async_compile.triton('triton_red_fused_native_group_norm_35', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = (rindex // 32) tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/n6/cn6qna6a5zcefniteffvenzrplgozrkvtfrmxyptnokip3eazoqi.py # Topologically Sorted Source Nodes: [group_norm_28, y_32], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_28 => add_94, mul_57 # y_32 => relu_25 # Graph fragment: # %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_57, %unsqueeze_173), kwargs = {}) # %add_94 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_57, %unsqueeze_170), kwargs = {}) # %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_94,), kwargs = {}) triton_poi_fused_native_group_norm_relu_36 = async_compile.triton('triton_poi_fused_native_group_norm_relu_36', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fz/cfzvfdwmzvehaoxqcmtmwxpejjg5f7sigjjg5u5l7pksufqevebu.py # Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_37 => add_95 # sqrt_29 => sqrt_29 # sub_29 => sub_58 # var_mean_29 => var_mean_58 # w_29 => div_29 # Graph fragment: # %var_mean_58 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_89, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_89, %getitem_119), kwargs = {}) # %add_95 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_118, 1e-05), kwargs = {}) # %sqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_95,), kwargs = {}) # %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_58, %sqrt_29), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_37 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_37', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 16384], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_37', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (9216*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zi/czijqpshhstwtgeyhgjnqwe3ltsbnlcf5v7mfuhqpof6rqbmxdba.py # Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # group_norm_29 => add_96, rsqrt_29, var_mean_59 # Graph fragment: # %var_mean_59 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_58, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_96 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_120, 1e-06), kwargs = {}) # %rsqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_96,), kwargs = {}) triton_per_fused_native_group_norm_38 = async_compile.triton('triton_per_fused_native_group_norm_38', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_38', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): xnumel = 128 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = (rindex // 32) x0 = xindex % 32 x1 = (xindex // 32) x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (16384*x1)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + (x4), tmp18, None) tl.store(out_ptr0 + (x4), tmp8, None) tl.store(out_ptr1 + (x4), tmp13, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/lu/clu7bfw6fbczlr6qonp3ndodtuhzlp27gktlq4ssr7p7qxa5q6hi.py # Topologically Sorted Source Nodes: [group_norm_29, y_33], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm_29 => add_97, mul_59 # y_33 => relu_26 # Graph fragment: # %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_59, %unsqueeze_179), kwargs = {}) # %add_97 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_59, %unsqueeze_176), kwargs = {}) # %relu_26 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_97,), kwargs = {}) triton_poi_fused_native_group_norm_relu_39 = async_compile.triton('triton_poi_fused_native_group_norm_relu_39', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_39', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = (xindex // 16384) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jp/cjpsly7wijbnbz4sn4c6j4meq243hk4betvsb3xw5ulcn6k2kgbj.py # Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_38 => add_98 # sqrt_30 => sqrt_30 # sub_30 => sub_60 # var_mean_30 => var_mean_60 # w_30 => div_30 # Graph fragment: # %var_mean_60 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_92, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_92, %getitem_123), kwargs = {}) # %add_98 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_122, 1e-05), kwargs = {}) # %sqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_98,), kwargs = {}) # %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_60, %sqrt_30), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_40 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_40', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_40', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 4096 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xk/cxky7odmklpyu27mloewsx7mitbjlayrh3ine4inkk44gtr2ch6i.py # Topologically Sorted Source Nodes: [y_34], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # y_34 => add_99, rsqrt_30, var_mean_61 # Graph fragment: # %var_mean_61 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_60, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_99 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_124, 1e-06), kwargs = {}) # %rsqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_99,), kwargs = {}) triton_red_fused_native_group_norm_41 = async_compile.triton('triton_red_fused_native_group_norm_41', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[128, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = (xindex // 32) tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = (rindex // 128) tmp0 = tl.load(in_ptr0 + (r2 + (128*x0) + (4096*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tl.store(out_ptr0 + (x4), tmp2, xmask) tl.store(out_ptr1 + (x4), tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ir/cirapbyv5relllyqwlvanqnnjaovfqje3b55aqltsehy63agad4t.py # Topologically Sorted Source Nodes: [residual_5, y_34, add_39, y_35], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_39 => add_101 # residual_5 => add_91, mul_55 # y_34 => add_100, mul_61 # y_35 => relu_27 # Graph fragment: # %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_55, %unsqueeze_167), kwargs = {}) # %add_91 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_55, %unsqueeze_164), kwargs = {}) # %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_61, %unsqueeze_185), kwargs = {}) # %add_100 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_61, %unsqueeze_182), kwargs = {}) # %add_101 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_91, %add_100), kwargs = {}) # %relu_27 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_101,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_42 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_42', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_42', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + (x3), None) tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 2048.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + (x3), tmp30, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ue/cueb6z6jew6lkkjx2uh4cdajr6ku5ig4w55b7kwo6wdkhujwh4k3.py # Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add_40 => add_102 # sqrt_31 => sqrt_31 # sub_31 => sub_62 # var_mean_31 => var_mean_62 # w_31 => div_31 # Graph fragment: # %var_mean_62 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_95, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_95, %getitem_127), kwargs = {}) # %add_102 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_126, 1e-05), kwargs = {}) # %sqrt_31 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_102,), kwargs = {}) # %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_62, %sqrt_31), kwargs = {}) triton_red_fused_add_div_sqrt_sub_var_mean_43 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_43', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[1024, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_43', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce( tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0 ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford( tmp2_mean, tmp2_m2, tmp2_weight, 1 ) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4 = tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask & xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mi/cmijm77mwyric7y5t6cn3vfjud3morw7ct5qg77vxwthjlfq47yn.py # Topologically Sorted Source Nodes: [y_38, add_43, y_39], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_43 => add_111 # y_38 => add_110, mul_67 # y_39 => relu_30 # Graph fragment: # %mul_67 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_67, %unsqueeze_203), kwargs = {}) # %add_110 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_67, %unsqueeze_200), kwargs = {}) # %add_111 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_27, %add_110), kwargs = {}) # %relu_30 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_111,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_44 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_44', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_44', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 65536) tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x3), None) tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (x3), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/z6/cz6rwsc56wzlwtrrsxzfvms2o3mmqsadufo3tifzlb33nhtkmabw.py # Topologically Sorted Source Nodes: [y_46, add_51, y_47], Original ATen: [aten.native_group_norm, aten.add, aten.relu] # Source node to ATen node mapping: # add_51 => add_131 # y_46 => add_130, mul_79 # y_47 => relu_36 # Graph fragment: # %mul_79 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_79, %unsqueeze_239), kwargs = {}) # %add_130 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_79, %unsqueeze_236), kwargs = {}) # %add_131 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_33, %add_130), kwargs = {}) # %relu_36 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_131,), kwargs = {}) triton_poi_fused_add_native_group_norm_relu_45 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_45', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_45', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = (yindex // 16) y0 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + ((32*y1) + (x2 // 128)), ymask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + ((32*y1) + (x2 // 128)), ymask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + (x2), None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1, 1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (y0 + (16*x2) + (65536*y1)), tmp17, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gs/cgs5wanuanwzjve3cdq7txfmiy3ai2syofxngfik5233huybzdft.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] # Source node to ATen node mapping: # Graph fragment: # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_36, 0), kwargs = {}) triton_poi_fused_threshold_backward_46 = async_compile.triton('triton_poi_fused_threshold_backward_46', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_backward_46', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = (yindex // 4096) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (y0 + (4096*x2) + (65536*y1)), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121 = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256, ), (1, )) assert_size_stride(primals_4, (256, ), (1, )) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (1024, ), (1, )) assert_size_stride(primals_7, (1024, ), (1, )) assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (256, ), (1, )) assert_size_stride(primals_10, (256, ), (1, )) assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_12, (256, ), (1, )) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (1024, ), (1, )) assert_size_stride(primals_16, (1024, ), (1, )) assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_18, (256, ), (1, )) assert_size_stride(primals_19, (256, ), (1, )) assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_21, (256, ), (1, )) assert_size_stride(primals_22, (256, ), (1, )) assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_24, (1024, ), (1, )) assert_size_stride(primals_25, (1024, ), (1, )) assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_27, (256, ), (1, )) assert_size_stride(primals_28, (256, ), (1, )) assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_30, (256, ), (1, )) assert_size_stride(primals_31, (256, ), (1, )) assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_33, (1024, ), (1, )) assert_size_stride(primals_34, (1024, ), (1, )) assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_36, (256, ), (1, )) assert_size_stride(primals_37, (256, ), (1, )) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256, ), (1, )) assert_size_stride(primals_40, (256, ), (1, )) assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (1024, ), (1, )) assert_size_stride(primals_43, (1024, ), (1, )) assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_45, (2048, ), (1, )) assert_size_stride(primals_46, (2048, ), (1, )) assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_48, (512, ), (1, )) assert_size_stride(primals_49, (512, ), (1, )) assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_51, (512, ), (1, )) assert_size_stride(primals_52, (512, ), (1, )) assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_54, (2048, ), (1, )) assert_size_stride(primals_55, (2048, ), (1, )) assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_57, (512, ), (1, )) assert_size_stride(primals_58, (512, ), (1, )) assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_60, (512, ), (1, )) assert_size_stride(primals_61, (512, ), (1, )) assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_63, (2048, ), (1, )) assert_size_stride(primals_64, (2048, ), (1, )) assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_66, (512, ), (1, )) assert_size_stride(primals_67, (512, ), (1, )) assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_69, (512, ), (1, )) assert_size_stride(primals_70, (512, ), (1, )) assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_72, (2048, ), (1, )) assert_size_stride(primals_73, (2048, ), (1, )) assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_75, (512, ), (1, )) assert_size_stride(primals_76, (512, ), (1, )) assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_78, (512, ), (1, )) assert_size_stride(primals_79, (512, ), (1, )) assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_81, (2048, ), (1, )) assert_size_stride(primals_82, (2048, ), (1, )) assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_84, (4096, ), (1, )) assert_size_stride(primals_85, (4096, ), (1, )) assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_87, (1024, ), (1, )) assert_size_stride(primals_88, (1024, ), (1, )) assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_90, (1024, ), (1, )) assert_size_stride(primals_91, (1024, ), (1, )) assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_93, (4096, ), (1, )) assert_size_stride(primals_94, (4096, ), (1, )) assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_96, (1024, ), (1, )) assert_size_stride(primals_97, (1024, ), (1, )) assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_99, (1024, ), (1, )) assert_size_stride(primals_100, (1024, ), (1, )) assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_102, (4096, ), (1, )) assert_size_stride(primals_103, (4096, ), (1, )) assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_105, (1024, ), (1, )) assert_size_stride(primals_106, (1024, ), (1, )) assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_108, (1024, ), (1, )) assert_size_stride(primals_109, (1024, ), (1, )) assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_111, (4096, ), (1, )) assert_size_stride(primals_112, (4096, ), (1, )) assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_114, (1024, ), (1, )) assert_size_stride(primals_115, (1024, ), (1, )) assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_117, (1024, ), (1, )) assert_size_stride(primals_118, (1024, ), (1, )) assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_120, (4096, ), (1, )) assert_size_stride(primals_121, (4096, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 768, 49, grid=grid(768, 49), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_2, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_11, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_11 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_20, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_20 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_29, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_29 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_38, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0) del primals_38 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_50, buf6, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_50 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_59, buf7, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_59 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_68, buf8, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_68 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_77, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0) del primals_77 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_89, buf10, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_89 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_98, buf11, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_98 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_107, buf12, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_107 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_116, buf13, 1048576, 9, grid=grid(1048576, 9), stream=stream0) del primals_116 buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf15 # reuse buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32) # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_5.run(buf17, buf0, buf18, 256, 147, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_6.run(buf19, buf20, buf21, buf23, 128, 8192, grid=grid(128), stream=stream0) buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.float32) # Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_7.run(buf19, buf20, buf21, primals_3, primals_4, buf24, 1048576, grid=grid(1048576), stream=stream0) del primals_4 buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.int8) # Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf24, buf25, buf26, 230400, grid=grid(230400), stream=stream0) buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf28 # reuse buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf30, primals_5, buf31, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf32, buf33, buf34, buf36, 4096, 225, grid=grid(4096), stream=stream0) buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf38 # reuse buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf40, primals_8, buf41, 256, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf43 = buf21; del buf21 # reuse buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf42, buf43, buf44, buf46, 128, 1800, grid=grid(128), stream=stream0) buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_2, y], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf42, buf43, buf44, primals_9, primals_10, buf47, 230400, grid=grid(230400), stream=stream0) del primals_10 buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf49 # reuse buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf51, buf2, buf52, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf54 = buf44; del buf44 # reuse buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf53, buf54, buf55, buf57, 128, 1800, grid=grid(128), stream=stream0) buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_3, y_1], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf53, buf54, buf55, primals_12, primals_13, buf58, 230400, grid=grid(230400), stream=stream0) del primals_13 buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf60 # reuse buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_4, sub_4, add_4, sqrt_4, w_4], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf62, primals_14, buf63, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution] buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf65 = buf55; del buf55 # reuse buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf64, buf65, buf66, buf68, 128, 7200, grid=grid(128), stream=stream0) buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) buf70 = buf69; del buf69 # reuse # Topologically Sorted Source Nodes: [residual_1, y_2, add_5, y_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_16.run(buf70, buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66, primals_15, primals_16, 921600, grid=grid(921600), stream=stream0) del primals_16 del primals_7 buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf72 # reuse buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf74, primals_17, buf75, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution] buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf77 = buf66; del buf66 # reuse buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_5], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf76, buf77, buf78, buf80, 128, 1800, grid=grid(128), stream=stream0) buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_5, y_4], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf76, buf77, buf78, primals_18, primals_19, buf81, 230400, grid=grid(230400), stream=stream0) del primals_19 buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf83 # reuse buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_6, sub_6, add_7, sqrt_6, w_6], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf85, buf3, buf86, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution] buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf88 = buf78; del buf78 # reuse buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_6], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf87, buf88, buf89, buf91, 128, 1800, grid=grid(128), stream=stream0) buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_6, y_5], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf87, buf88, buf89, primals_21, primals_22, buf92, 230400, grid=grid(230400), stream=stream0) del primals_22 buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf94 # reuse buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_7, sub_7, add_8, sqrt_7, w_7], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf96, primals_23, buf97, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution] buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf99 = buf89; del buf89 # reuse buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_6], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf98, buf99, buf100, buf102, 128, 7200, grid=grid(128), stream=stream0) buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) # Topologically Sorted Source Nodes: [y_6, add_9, y_7], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_18.run(buf70, buf98, buf99, buf100, primals_24, primals_25, buf103, 921600, grid=grid(921600), stream=stream0) del primals_25 buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf105 # reuse buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_8, sub_8, add_10, sqrt_8, w_8], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf107, primals_26, buf108, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution] buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf110 = buf100; del buf100 # reuse buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_8], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf109, buf110, buf111, buf113, 128, 1800, grid=grid(128), stream=stream0) buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_8, y_8], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf109, buf110, buf111, primals_27, primals_28, buf114, 230400, grid=grid(230400), stream=stream0) del primals_28 buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf116 # reuse buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_9, sub_9, add_11, sqrt_9, w_9], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf118, buf4, buf119, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution] buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf121 = buf111; del buf111 # reuse buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_9], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf120, buf121, buf122, buf124, 128, 1800, grid=grid(128), stream=stream0) buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_9, y_9], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf120, buf121, buf122, primals_30, primals_31, buf125, 230400, grid=grid(230400), stream=stream0) del primals_31 buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf127 # reuse buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_10, sub_10, add_12, sqrt_10, w_10], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf129, primals_32, buf130, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution] buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf132 = buf122; del buf122 # reuse buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_10], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf131, buf132, buf133, buf135, 128, 7200, grid=grid(128), stream=stream0) buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) # Topologically Sorted Source Nodes: [y_10, add_13, y_11], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_18.run(buf103, buf131, buf132, buf133, primals_33, primals_34, buf136, 921600, grid=grid(921600), stream=stream0) del primals_34 buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf138 # reuse buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_11, sub_11, add_14, sqrt_11, w_11], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf140, primals_35, buf141, 256, 1024, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution] buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf143 = buf133; del buf133 # reuse buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_11], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf142, buf143, buf144, buf146, 128, 1800, grid=grid(128), stream=stream0) buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_11, y_12], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf142, buf143, buf144, primals_36, primals_37, buf147, 230400, grid=grid(230400), stream=stream0) del primals_37 buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf149 # reuse buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_12, sub_12, add_15, sqrt_12, w_12], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf151, buf5, buf152, 256, 2304, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution] buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf154 = buf144; del buf144 # reuse buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_12], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_12.run(buf153, buf154, buf155, buf157, 128, 1800, grid=grid(128), stream=stream0) buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) # Topologically Sorted Source Nodes: [group_norm_12, y_13], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_13.run(buf153, buf154, buf155, primals_39, primals_40, buf158, 230400, grid=grid(230400), stream=stream0) del primals_40 buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf160 # reuse buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) # Topologically Sorted Source Nodes: [var_mean_13, sub_13, add_16, sqrt_13, w_13], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf162, primals_41, buf163, 1024, 256, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution] buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf165 = buf155; del buf155 # reuse buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_14], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_15.run(buf164, buf165, buf166, buf168, 128, 7200, grid=grid(128), stream=stream0) buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) # Topologically Sorted Source Nodes: [y_14, add_17, y_15], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_18.run(buf136, buf164, buf165, buf166, primals_42, primals_43, buf169, 921600, grid=grid(921600), stream=stream0) del primals_43 buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf171 # reuse buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_19.run(buf173, primals_44, buf174, 2048, 1024, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution] buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) # Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_20.run(buf175, buf176, buf177, buf179, 8192, 64, grid=grid(8192), stream=stream0) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf181 # reuse buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_21.run(buf183, primals_47, buf184, 512, 1024, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution] buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512)) buf186 = buf166; del buf166 # reuse buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_22.run(buf185, buf186, buf187, buf189, 128, 3600, grid=grid(128), stream=stream0) buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_15, y_16], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_23.run(buf185, buf186, buf187, primals_48, primals_49, buf190, 460800, grid=grid(460800), stream=stream0) del primals_49 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf192 # reuse buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf194, buf6, buf195, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution] buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187; del buf187 # reuse buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf196, buf197, buf198, buf200, 128, 1024, grid=grid(128), stream=stream0) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_16, y_17], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf196, buf197, buf198, primals_51, primals_52, buf201, 131072, grid=grid(131072), stream=stream0) del primals_52 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf203 # reuse buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf205, primals_53, buf206, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution] buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf198; del buf198 # reuse buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf207, buf208, buf209, buf211, 128, 4096, grid=grid(128), stream=stream0) buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) buf213 = buf212; del buf212 # reuse # Topologically Sorted Source Nodes: [residual_3, y_18, add_22, y_19], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_29.run(buf213, buf175, buf176, buf177, primals_45, primals_46, buf207, buf208, buf209, primals_54, primals_55, 524288, grid=grid(524288), stream=stream0) del buf177 del primals_46 del primals_55 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf215 # reuse buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf217, primals_56, buf218, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution] buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf209; del buf209 # reuse buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_18], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf219, buf220, buf221, buf223, 128, 1024, grid=grid(128), stream=stream0) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_18, y_20], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf219, buf220, buf221, primals_57, primals_58, buf224, 131072, grid=grid(131072), stream=stream0) del primals_58 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf226 # reuse buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_19, sub_19, add_24, sqrt_19, w_19], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf228, buf7, buf229, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution] buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221; del buf221 # reuse buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_19], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf230, buf231, buf232, buf234, 128, 1024, grid=grid(128), stream=stream0) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_19, y_21], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf230, buf231, buf232, primals_60, primals_61, buf235, 131072, grid=grid(131072), stream=stream0) del primals_61 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf237 # reuse buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_20, sub_20, add_25, sqrt_20, w_20], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf239, primals_62, buf240, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution] buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf232; del buf232 # reuse buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_22], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf241, buf242, buf243, buf245, 128, 4096, grid=grid(128), stream=stream0) buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [y_22, add_26, y_23], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_31.run(buf213, buf241, buf242, buf243, primals_63, primals_64, buf246, 524288, grid=grid(524288), stream=stream0) del primals_64 buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf248 # reuse buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_21, sub_21, add_27, sqrt_21, w_21], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf250, primals_65, buf251, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution] buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf253 = buf243; del buf243 # reuse buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_21], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf252, buf253, buf254, buf256, 128, 1024, grid=grid(128), stream=stream0) buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_21, y_24], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf252, buf253, buf254, primals_66, primals_67, buf257, 131072, grid=grid(131072), stream=stream0) del primals_67 buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf259 # reuse buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_22, sub_22, add_28, sqrt_22, w_22], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf261, buf8, buf262, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution] buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf264 = buf254; del buf254 # reuse buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_22], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf263, buf264, buf265, buf267, 128, 1024, grid=grid(128), stream=stream0) buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_22, y_25], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf263, buf264, buf265, primals_69, primals_70, buf268, 131072, grid=grid(131072), stream=stream0) del primals_70 buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf270 # reuse buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_23, sub_23, add_29, sqrt_23, w_23], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf272, primals_71, buf273, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_23], Original ATen: [aten.convolution] buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf275 = buf265; del buf265 # reuse buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_26], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf274, buf275, buf276, buf278, 128, 4096, grid=grid(128), stream=stream0) buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [y_26, add_30, y_27], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_31.run(buf246, buf274, buf275, buf276, primals_72, primals_73, buf279, 524288, grid=grid(524288), stream=stream0) del primals_73 buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf281 # reuse buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_24, sub_24, add_31, sqrt_24, w_24], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf283, primals_74, buf284, 512, 2048, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_24], Original ATen: [aten.convolution] buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf286 = buf276; del buf276 # reuse buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_24], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf285, buf286, buf287, buf289, 128, 1024, grid=grid(128), stream=stream0) buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_24, y_28], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf285, buf286, buf287, primals_75, primals_76, buf290, 131072, grid=grid(131072), stream=stream0) del primals_76 buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf292 # reuse buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_25, sub_25, add_32, sqrt_25, w_25], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf294, buf9, buf295, 512, 4608, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_25], Original ATen: [aten.convolution] buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf297 = buf287; del buf287 # reuse buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_25.run(buf296, buf297, buf298, buf300, 128, 1024, grid=grid(128), stream=stream0) buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [group_norm_25, y_29], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_26.run(buf296, buf297, buf298, primals_78, primals_79, buf301, 131072, grid=grid(131072), stream=stream0) del primals_79 buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf303 # reuse buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) # Topologically Sorted Source Nodes: [var_mean_26, sub_26, add_33, sqrt_26, w_26], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf305, primals_80, buf306, 2048, 512, grid=grid(2048), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_26], Original ATen: [aten.convolution] buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf308 = buf298; del buf298 # reuse buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_30], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_28.run(buf307, buf308, buf309, buf311, 128, 4096, grid=grid(128), stream=stream0) buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) # Topologically Sorted Source Nodes: [y_30, add_34, y_31], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_31.run(buf279, buf307, buf308, buf309, primals_81, primals_82, buf312, 524288, grid=grid(524288), stream=stream0) del primals_82 buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096, 4096), 0); del buf34 # reuse buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf314 # reuse buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_32.run(buf316, primals_83, buf317, 4096, 2048, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [residual_4], Original ATen: [aten.convolution] buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32) buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32) buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32) # Topologically Sorted Source Nodes: [residual_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_33.run(buf318, buf319, buf320, buf322, 16384, 16, grid=grid(16384), stream=stream0) buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf324 # reuse buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) # Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_34.run(buf326, primals_86, buf327, 1024, 2048, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_28], Original ATen: [aten.convolution] buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf329 = buf309; del buf309 # reuse buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_35.run(buf328, buf329, buf330, buf332, 128, 2048, grid=grid(128), stream=stream0) buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_28, y_32], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_36.run(buf328, buf329, buf330, primals_87, primals_88, buf333, 262144, grid=grid(262144), stream=stream0) del primals_88 buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf335 # reuse buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf337, buf10, buf338, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_29], Original ATen: [aten.convolution] buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf340 = buf330; del buf330 # reuse buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf339, buf340, buf341, buf343, 128, 512, grid=grid(128), stream=stream0) buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_29, y_33], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf339, buf340, buf341, primals_90, primals_91, buf344, 65536, grid=grid(65536), stream=stream0) del primals_91 buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf346 # reuse buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf348, primals_92, buf349, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_30], Original ATen: [aten.convolution] buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf351 = buf341; del buf341 # reuse buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_34], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf350, buf351, buf352, buf354, 128, 2048, grid=grid(128), stream=stream0) buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) buf356 = buf355; del buf355 # reuse # Topologically Sorted Source Nodes: [residual_5, y_34, add_39, y_35], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_42.run(buf356, buf318, buf319, buf320, primals_84, primals_85, buf350, buf351, buf352, primals_93, primals_94, 262144, grid=grid(262144), stream=stream0) del buf320 del primals_85 del primals_94 buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf358 # reuse buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf360, primals_95, buf361, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_31], Original ATen: [aten.convolution] buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf363 = buf352; del buf352 # reuse buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_31], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf362, buf363, buf364, buf366, 128, 512, grid=grid(128), stream=stream0) buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_31, y_36], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf362, buf363, buf364, primals_96, primals_97, buf367, 65536, grid=grid(65536), stream=stream0) del primals_97 buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf369 # reuse buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_32, sub_32, add_41, sqrt_32, w_32], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf371, buf11, buf372, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_32], Original ATen: [aten.convolution] buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf374 = buf364; del buf364 # reuse buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_32], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf373, buf374, buf375, buf377, 128, 512, grid=grid(128), stream=stream0) buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_32, y_37], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf373, buf374, buf375, primals_99, primals_100, buf378, 65536, grid=grid(65536), stream=stream0) del primals_100 buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf380 # reuse buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_33, sub_33, add_42, sqrt_33, w_33], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf382, primals_101, buf383, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_33], Original ATen: [aten.convolution] buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf385 = buf375; del buf375 # reuse buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_38], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf384, buf385, buf386, buf388, 128, 2048, grid=grid(128), stream=stream0) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) # Topologically Sorted Source Nodes: [y_38, add_43, y_39], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_44.run(buf356, buf384, buf385, buf386, primals_102, primals_103, buf389, 262144, grid=grid(262144), stream=stream0) del primals_103 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf391 # reuse buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_34, sub_34, add_44, sqrt_34, w_34], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf393, primals_104, buf394, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_34], Original ATen: [aten.convolution] buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386; del buf386 # reuse buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_34], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf395, buf396, buf397, buf399, 128, 512, grid=grid(128), stream=stream0) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_34, y_40], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf395, buf396, buf397, primals_105, primals_106, buf400, 65536, grid=grid(65536), stream=stream0) del primals_106 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf402 # reuse buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_35, sub_35, add_45, sqrt_35, w_35], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf404, buf12, buf405, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_35], Original ATen: [aten.convolution] buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397; del buf397 # reuse buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_35], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf406, buf407, buf408, buf410, 128, 512, grid=grid(128), stream=stream0) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_35, y_41], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf406, buf407, buf408, primals_108, primals_109, buf411, 65536, grid=grid(65536), stream=stream0) del primals_109 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf413 # reuse buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_36, sub_36, add_46, sqrt_36, w_36], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf415, primals_110, buf416, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_36], Original ATen: [aten.convolution] buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf408; del buf408 # reuse buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_42], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf417, buf418, buf419, buf421, 128, 2048, grid=grid(128), stream=stream0) buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32) # Topologically Sorted Source Nodes: [y_42, add_47, y_43], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_44.run(buf389, buf417, buf418, buf419, primals_111, primals_112, buf422, 262144, grid=grid(262144), stream=stream0) del primals_112 buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf424 # reuse buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) # Topologically Sorted Source Nodes: [var_mean_37, sub_37, add_48, sqrt_37, w_37], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf426, primals_113, buf427, 1024, 4096, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_37], Original ATen: [aten.convolution] buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf429 = buf419; del buf419 # reuse buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf428, buf429, buf430, buf432, 128, 512, grid=grid(128), stream=stream0) buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_37, y_44], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf428, buf429, buf430, primals_114, primals_115, buf433, 65536, grid=grid(65536), stream=stream0) del primals_115 buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf435 # reuse buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_38, sub_38, add_49, sqrt_38, w_38], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf437, buf13, buf438, 1024, 9216, grid=grid(1024), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_38], Original ATen: [aten.convolution] buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf440 = buf430; del buf430 # reuse buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_38.run(buf439, buf440, buf441, buf443, 128, 512, grid=grid(128), stream=stream0) buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) # Topologically Sorted Source Nodes: [group_norm_38, y_45], Original ATen: [aten.native_group_norm, aten.relu] triton_poi_fused_native_group_norm_relu_39.run(buf439, buf440, buf441, primals_117, primals_118, buf444, 65536, grid=grid(65536), stream=stream0) del primals_118 buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf446 # reuse buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) # Topologically Sorted Source Nodes: [var_mean_39, sub_39, add_50, sqrt_39, w_39], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf448, primals_119, buf449, 4096, 1024, grid=grid(4096), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_39], Original ATen: [aten.convolution] buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf451 = buf441; del buf441 # reuse buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32) # Topologically Sorted Source Nodes: [y_46], Original ATen: [aten.native_group_norm] triton_red_fused_native_group_norm_41.run(buf450, buf451, buf452, buf454, 128, 2048, grid=grid(128), stream=stream0) buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_46, add_51, y_47], Original ATen: [aten.native_group_norm, aten.add, aten.relu] triton_poi_fused_add_native_group_norm_relu_45.run(buf422, buf450, buf451, buf452, primals_120, primals_121, buf455, 64, 4096, grid=grid(64, 4096), stream=stream0) del buf452 del primals_121 buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward] triton_poi_fused_threshold_backward_46.run(buf455, buf456, 16384, 16, grid=grid(16384, 16), stream=stream0) return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8, primals_9, buf2, primals_12, primals_14, primals_15, primals_17, primals_18, buf3, primals_21, primals_23, primals_24, primals_26, primals_27, buf4, primals_30, primals_32, primals_33, primals_35, primals_36, buf5, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, buf6, primals_51, primals_53, primals_54, primals_56, primals_57, buf7, primals_60, primals_62, primals_63, primals_65, primals_66, buf8, primals_69, primals_71, primals_72, primals_74, primals_75, buf9, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, buf10, primals_90, primals_92, primals_93, primals_95, primals_96, buf11, primals_99, primals_101, primals_102, primals_104, primals_105, buf12, primals_108, primals_110, primals_111, primals_113, primals_114, buf13, primals_117, primals_119, primals_120, buf17, buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0), reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26, buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1), 0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40, buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0), reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52, buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0), reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63, buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0), reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75, buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0), reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86, buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0), reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97, buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0), reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107, buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0), reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118, buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0), reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129, buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0), reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140, buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151, buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0), reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162, buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0), reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173, buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0), reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0), reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0), reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250, buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0), reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261, buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0), reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272, buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0), reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283, buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0), reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294, buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305, buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0), reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316, buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0), reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327, buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0), reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337, buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0), reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348, buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360, buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371, buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0), reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382, buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415, buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0), reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426, buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0), reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437, buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0), reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448, buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0), reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_40 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_41 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_42 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_43 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_44 = rand_strided((2048, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_45 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_46 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_47 = rand_strided((512, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_48 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_49 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_50 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_51 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_52 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_53 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_54 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_55 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_56 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_57 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_58 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_59 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_60 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_61 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_62 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_63 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_64 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_65 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_66 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_67 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_68 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_69 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_70 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_71 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_72 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_73 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_74 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_75 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_76 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_77 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_78 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_79 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_80 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_81 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_82 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_83 = rand_strided((4096, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_84 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_85 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_86 = rand_strided((1024, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_87 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_88 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_89 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_90 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_91 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_92 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_93 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_94 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_95 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_96 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_97 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_98 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_99 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_100 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_101 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_102 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_103 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_104 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_105 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_106 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_107 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_108 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_109 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_110 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_111 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_112 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_113 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_114 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_115 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_116 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_117 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_118 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_119 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_120 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) primals_121 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from collections import OrderedDict import torch.nn as nn def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv1 = conv1x1(cin, cmid, bias=False) self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv2 = conv3x3(cmid, cmid, stride, bias=False) self.gn3 = nn.GroupNorm(32, cout, eps=1e-06) self.conv3 = conv1x1(cmid, cout, bias=False) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride, bias=False) self.gn_proj = nn.GroupNorm(cout, cout) def forward(self, x): residual = x if hasattr(self, 'downsample'): residual = self.downsample(x) residual = self.gn_proj(residual) y = self.relu(self.gn1(self.conv1(x))) y = self.relu(self.gn2(self.conv2(y))) y = self.gn3(self.conv3(y)) y = self.relu(residual + y) return y def load_from(self, weights, n_block, n_unit): conv1_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv1/kernel'], conv=True) conv2_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv2/kernel'], conv=True) conv3_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv3/kernel'], conv=True) gn1_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn1/scale']) gn1_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn1/bias']) gn2_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn2/scale']) gn2_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn2/bias']) gn3_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn3/scale']) gn3_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn3/bias']) self.conv1.weight.copy_(conv1_weight) self.conv2.weight.copy_(conv2_weight) self.conv3.weight.copy_(conv3_weight) self.gn1.weight.copy_(gn1_weight.view(-1)) self.gn1.bias.copy_(gn1_bias.view(-1)) self.gn2.weight.copy_(gn2_weight.view(-1)) self.gn2.bias.copy_(gn2_bias.view(-1)) self.gn3.weight.copy_(gn3_weight.view(-1)) self.gn3.bias.copy_(gn3_bias.view(-1)) if hasattr(self, 'downsample'): proj_conv_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv_proj/kernel'], conv=True) proj_gn_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn_proj/scale']) proj_gn_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn_proj/bias']) self.downsample.weight.copy_(proj_conv_weight) self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) class ResNetV2(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor): super().__init__() width = int(64 * width_factor) self.width = width self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn. GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True )), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width * 4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 4, cout=width * 4, cmid=width)) for i in range(2, block_units[0 ] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8, cout=width * 8, cmid=width * 2)) for i in range(2, block_units[ 1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16, cout=width * 16, cmid=width * 4)) for i in range(2, block_units [2] + 1)])))])) def forward(self, x): x = self.root(x) x = self.body(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'block_units': [4, 4, 4], 'width_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.functional as F from collections import OrderedDict import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 768 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 1024 y1 = yindex // 1024 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 rnumel = 147 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask & xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 147, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask & xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 147.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp22 = tmp0 - tmp10 tmp23 = tmp22 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp21, xmask) tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 262144 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 8192.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 256 x2 = xindex // 262144 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 8192.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 256 x1 = xindex // 256 % 15 x2 = xindex // 3840 % 15 x3 = xindex // 57600 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp5 = tl.load(in_ptr0 + (8192 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp7 = tl.load(in_ptr0 + (8448 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp9 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp11 = tl.load(in_ptr0 + (16384 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp13 = tl.load(in_ptr0 + (16640 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp15 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x4, tmp16, xmask) tl.store(out_ptr1 + x4, tmp41, xmask) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 225 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex x0 = xindex % 1024 x1 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 1024 * r2 + 230400 * x1), rmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(rmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 225, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(rmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = 225.0 tmp18 = tmp16 / tmp17 tmp19 = 1e-05 tmp20 = tmp18 + tmp19 tmp21 = libdevice.rsqrt(tmp20) tl.store(out_ptr2 + x3, tmp21, None) tl.store(out_ptr0 + x3, tmp10, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 1800 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 8 r3 = rindex // 8 tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 57600 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 1800.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 256 x2 = xindex // 57600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1800.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 256 rnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2304.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 7200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 230400 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 7200.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 230400 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 1024 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 225.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 7200.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 230400 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 7200.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2048 x1 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r2 + 131072 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 64, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 64.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x3, tmp18, None) tl.store(out_ptr0 + x3, tmp8, None) tl.store(out_ptr1 + x3, tmp13, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 3600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 16 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 115200 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 3600.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 115200 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 3600.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4608.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 16 r3 = rindex // 16 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 512 x2 = xindex // 32768 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 1024.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 64 r3 = rindex // 64 tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 2048 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 2048 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 64.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 4096.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 2048 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 64), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 4096.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, None) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy= 'evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask) @triton.jit def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 65536 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.full([XBLOCK, 1], 16, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 16.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x3, tmp18, None) tl.store(out_ptr0 + x3, tmp8, None) tl.store(out_ptr1 + x3, tmp13, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask) @triton.jit def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 32 r3 = rindex // 32 tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 2048.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 9216.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask) @triton.jit def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex % 32 r3 = rindex // 32 x0 = xindex % 32 x1 = xindex // 32 x4 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 512, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 512.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-06 tmp17 = tmp15 + tmp16 tmp18 = libdevice.rsqrt(tmp17) tl.store(out_ptr2 + x4, tmp18, None) tl.store(out_ptr0 + x4, tmp8, None) tl.store(out_ptr1 + x4, tmp13, None) @triton.jit def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 1024 x2 = xindex // 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 512.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp14 = tl.full([1], 0, tl.int32) tmp15 = triton_helpers.maximum(tmp14, tmp13) tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 1024, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 1024.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None) @triton.jit def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 128 rnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 32 x1 = xindex // 32 tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) x4 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex % 128 r3 = rindex // 128 tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tl.store(out_ptr0 + x4, tmp2, xmask) tl.store(out_ptr1 + x4, tmp3, xmask) tmp5 = 2048.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tl.store(out_ptr2 + x4, tmp9, xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, None) tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 16.0 tmp5 = tmp3 / tmp4 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp2 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tmp16 = tmp14 - tmp15 tmp18 = 2048.0 tmp19 = tmp17 / tmp18 tmp20 = 1e-06 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp16 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tmp13 + tmp27 tmp29 = tl.full([1], 0, tl.int32) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(in_out_ptr0 + x3, tmp30, None) @triton.jit def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 1024 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32) tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers. welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0) ) tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean) tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2) tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight) tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean, tmp2_m2, tmp2_weight, 1) tmp2 = tmp2_tmp[:, None] tmp3 = tmp3_tmp[:, None] tmp4_tmp[:, None] tmp5 = 4096.0 tmp6 = tmp3 / tmp5 tmp7 = 1e-05 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tmp10 - tmp2 tmp12 = tmp11 / tmp9 tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask) @triton.jit def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 65536 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x3, None) tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 128), None, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + x3, tmp17, None) @triton.jit def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y1 = yindex // 16 y0 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + (32 * y1 + x2 // 128), ymask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr3 + (32 * y1 + x2 // 128), ymask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp5 = 2048.0 tmp6 = tmp4 / tmp5 tmp7 = 1e-06 tmp8 = tmp6 + tmp7 tmp9 = libdevice.rsqrt(tmp8) tmp10 = tmp3 * tmp9 tmp12 = tmp10 * tmp11 tmp14 = tmp12 + tmp13 tmp15 = tmp0 + tmp14 tmp16 = tl.full([1, 1], 0, tl.int32) tmp17 = triton_helpers.maximum(tmp16, tmp15) tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp17, ymask) @triton.jit def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4096 y1 = yindex // 4096 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tl.store(out_ptr0 + (y0 + 4096 * x2 + 65536 * y1), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121) = args args.clear() assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1)) assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_6, (1024,), (1,)) assert_size_stride(primals_7, (1024,), (1,)) assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256,), (1,)) assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_12, (256,), (1,)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_15, (1024,), (1,)) assert_size_stride(primals_16, (1024,), (1,)) assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_18, (256,), (1,)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_21, (256,), (1,)) assert_size_stride(primals_22, (256,), (1,)) assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_24, (1024,), (1,)) assert_size_stride(primals_25, (1024,), (1,)) assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256,), (1,)) assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_30, (256,), (1,)) assert_size_stride(primals_31, (256,), (1,)) assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_33, (1024,), (1,)) assert_size_stride(primals_34, (1024,), (1,)) assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_36, (256,), (1,)) assert_size_stride(primals_37, (256,), (1,)) assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_39, (256,), (1,)) assert_size_stride(primals_40, (256,), (1,)) assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_42, (1024,), (1,)) assert_size_stride(primals_43, (1024,), (1,)) assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_45, (2048,), (1,)) assert_size_stride(primals_46, (2048,), (1,)) assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_48, (512,), (1,)) assert_size_stride(primals_49, (512,), (1,)) assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_51, (512,), (1,)) assert_size_stride(primals_52, (512,), (1,)) assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_54, (2048,), (1,)) assert_size_stride(primals_55, (2048,), (1,)) assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_57, (512,), (1,)) assert_size_stride(primals_58, (512,), (1,)) assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_60, (512,), (1,)) assert_size_stride(primals_61, (512,), (1,)) assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_63, (2048,), (1,)) assert_size_stride(primals_64, (2048,), (1,)) assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_66, (512,), (1,)) assert_size_stride(primals_67, (512,), (1,)) assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_69, (512,), (1,)) assert_size_stride(primals_70, (512,), (1,)) assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_72, (2048,), (1,)) assert_size_stride(primals_73, (2048,), (1,)) assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_75, (512,), (1,)) assert_size_stride(primals_76, (512,), (1,)) assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_78, (512,), (1,)) assert_size_stride(primals_79, (512,), (1,)) assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_81, (2048,), (1,)) assert_size_stride(primals_82, (2048,), (1,)) assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_84, (4096,), (1,)) assert_size_stride(primals_85, (4096,), (1,)) assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_87, (1024,), (1,)) assert_size_stride(primals_88, (1024,), (1,)) assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_90, (1024,), (1,)) assert_size_stride(primals_91, (1024,), (1,)) assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_93, (4096,), (1,)) assert_size_stride(primals_94, (4096,), (1,)) assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_96, (1024,), (1,)) assert_size_stride(primals_97, (1024,), (1,)) assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_99, (1024,), (1,)) assert_size_stride(primals_100, (1024,), (1,)) assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_102, (4096,), (1,)) assert_size_stride(primals_103, (4096,), (1,)) assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_105, (1024,), (1,)) assert_size_stride(primals_106, (1024,), (1,)) assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_108, (1024,), (1,)) assert_size_stride(primals_109, (1024,), (1,)) assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_111, (4096,), (1,)) assert_size_stride(primals_112, (4096,), (1,)) assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1)) assert_size_stride(primals_114, (1024,), (1,)) assert_size_stride(primals_115, (1024,), (1,)) assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_117, (1024,), (1,)) assert_size_stride(primals_118, (1024,), (1,)) assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1)) assert_size_stride(primals_120, (4096,), (1,)) assert_size_stride(primals_121, (4096,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK= 32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_11, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_11 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_20, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_20 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_29, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_29 buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_38, buf5, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_38 buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_50, buf6, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_50 buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_59, buf7, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_59 buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_68, buf8, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_68 buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_poi_fused_3[grid(262144, 9)](primals_77, buf9, 262144, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_77 buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_89, buf10, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_89 buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_98, buf11, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_98 buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_107, buf12, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_107 buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024 ), torch.float32) triton_poi_fused_4[grid(1048576, 9)](primals_116, buf13, 1048576, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_116 buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf15 buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch. float32) triton_per_fused_add_div_sqrt_sub_var_mean_5[grid(256)](buf17, buf0, buf18, 256, 147, XBLOCK=1, num_warps=2, num_stages=1) buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256)) buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_6[grid(128)](buf19, buf20, buf21, buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1 ) buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.float32) triton_poi_fused_native_group_norm_relu_7[grid(1048576)](buf19, buf20, buf21, primals_3, primals_4, buf24, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_4 buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf24, buf25, buf26, 230400, XBLOCK=512, num_warps=8, num_stages=1) buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf28 buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf30, primals_5, buf31, 1024, 256, num_warps=2, num_stages=1) buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32) triton_per_fused_native_group_norm_10[grid(4096)](buf32, buf33, buf34, buf36, 4096, 225, XBLOCK=1, num_warps=2, num_stages=1) buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf38 buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(256)](buf40, primals_8, buf41, 256, 256, num_warps=2, num_stages=1) buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf43 = buf21 del buf21 buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf42, buf43, buf44, buf46, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf42, buf43, buf44, primals_9, primals_10, buf47, 230400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_10 buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf49 buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf51, buf2, buf52, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf54 = buf44 del buf44 buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf53, buf54, buf55, buf57, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf53, buf54, buf55, primals_12, primals_13, buf58, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_13 buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf60 buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf62, primals_14, buf63, 1024, 256, num_warps=2, num_stages=1) buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf65 = buf55 del buf55 buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_15[grid(128)](buf64, buf65, buf66, buf68, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) buf70 = buf69 del buf69 triton_poi_fused_add_native_group_norm_relu_16[grid(921600)](buf70, buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66, primals_15, primals_16, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_16 del primals_7 buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf72 buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf74, primals_17, buf75, 256, 1024, num_warps=8, num_stages=1) buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf77 = buf66 del buf66 buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf76, buf77, buf78, buf80, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf76, buf77, buf78, primals_18, primals_19, buf81, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_19 buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf83 buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf85, buf3, buf86, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf88 = buf78 del buf78 buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch. float32) triton_red_fused_native_group_norm_12[grid(128)](buf87, buf88, buf89, buf91, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf87, buf88, buf89, primals_21, primals_22, buf92, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_22 buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf94 buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf96, primals_23, buf97, 1024, 256, num_warps=2, num_stages=1) buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf99 = buf89 del buf89 buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf98, buf99, buf100, buf102, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf70, buf98, buf99, buf100, primals_24, primals_25, buf103, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_25 buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf105 buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf107, primals_26, buf108, 256, 1024, num_warps=8, num_stages=1) buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf110 = buf100 del buf100 buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf109, buf110, buf111, buf113, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf109, buf110, buf111, primals_27, primals_28, buf114, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_28 buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf116 buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf118, buf4, buf119, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf121 = buf111 del buf111 buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf120, buf121, buf122, buf124, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf120, buf121, buf122, primals_30, primals_31, buf125, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_31 buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf127 buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf129, primals_32, buf130, 1024, 256, num_warps=2, num_stages=1) buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf132 = buf122 del buf122 buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf131, buf132, buf133, buf135, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf103, buf131, buf132, buf133, primals_33, primals_34, buf136, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_34 buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf138 buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf140, primals_35, buf141, 256, 1024, num_warps=8, num_stages=1) buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf143 = buf133 del buf133 buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf142, buf143, buf144, buf146, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf142, buf143, buf144, primals_36, primals_37, buf147, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_37 buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32) buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0) del buf149 buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf151, buf5, buf152, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256)) buf154 = buf144 del buf144 buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_12[grid(128)](buf153, buf154, buf155, buf157, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32) triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf153, buf154, buf155, primals_39, primals_40, buf158, 230400, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_40 buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf160 buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf162, primals_41, buf163, 1024, 256, num_warps=2, num_stages=1) buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024)) buf165 = buf155 del buf155 buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_15[grid(128)](buf164, buf165, buf166, buf168, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32) triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf136, buf164, buf165, buf166, primals_42, primals_43, buf169, 921600, XBLOCK=512, num_warps=8, num_stages=1) del primals_43 buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf171 buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf173, primals_44, buf174, 2048, 1024, num_warps=8, num_stages=1) buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32) triton_per_fused_native_group_norm_20[grid(8192)](buf175, buf176, buf177, buf179, 8192, 64, XBLOCK=8, num_warps=4, num_stages=1) buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf181 buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024 ), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_21[grid(512)](buf183, primals_47, buf184, 512, 1024, num_warps=8, num_stages=1) buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512)) buf186 = buf166 del buf166 buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_22[grid(128)](buf185, buf186, buf187, buf189, 128, 3600, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512 ), torch.float32) triton_poi_fused_native_group_norm_relu_23[grid(460800)](buf185, buf186, buf187, primals_48, primals_49, buf190, 460800, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_49 buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf192 buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf194, buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf197 = buf187 del buf187 buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf196, buf197, buf198, buf200, 128, 1024, num_warps=8, num_stages=1) buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf196, buf197, buf198, primals_51, primals_52, buf201, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_52 buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf203 buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf205, primals_53, buf206, 2048, 512, num_warps=4, num_stages=1) buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf208 = buf198 del buf198 buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf207, buf208, buf209, buf211, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) buf213 = buf212 del buf212 triton_poi_fused_add_native_group_norm_relu_29[grid(524288)](buf213, buf175, buf176, buf177, primals_45, primals_46, buf207, buf208, buf209, primals_54, primals_55, 524288, XBLOCK=512, num_warps=8, num_stages=1) del buf177 del primals_46 del primals_55 buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf215 buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217, primals_56, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf220 = buf209 del buf209 buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf219, buf220, buf221, buf223, 128, 1024, num_warps=8, num_stages=1) buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf219, buf220, buf221, primals_57, primals_58, buf224, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_58 buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf226 buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf228, buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf231 = buf221 del buf221 buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf230, buf231, buf232, buf234, 128, 1024, num_warps=8, num_stages=1) buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf230, buf231, buf232, primals_60, primals_61, buf235, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_61 buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf237 buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf239, primals_62, buf240, 2048, 512, num_warps=4, num_stages=1) buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf242 = buf232 del buf232 buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf241, buf242, buf243, buf245, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf213, buf241, buf242, buf243, primals_63, primals_64, buf246, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_64 buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf248 buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf250, primals_65, buf251, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf253 = buf243 del buf243 buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf252, buf253, buf254, buf256, 128, 1024, num_warps=8, num_stages=1) buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf252, buf253, buf254, primals_66, primals_67, buf257, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_67 buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf259 buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf261, buf8, buf262, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf264 = buf254 del buf254 buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf263, buf264, buf265, buf267, 128, 1024, num_warps=8, num_stages=1) buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf263, buf264, buf265, primals_69, primals_70, buf268, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_70 buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf270 buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf272, primals_71, buf273, 2048, 512, num_warps=4, num_stages=1) buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf275 = buf265 del buf265 buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf274, buf275, buf276, buf278, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf246, buf274, buf275, buf276, primals_72, primals_73, buf279, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_73 buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf281 buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048 ), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf283, primals_74, buf284, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps =16, num_stages=1) buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf286 = buf276 del buf276 buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf285, buf286, buf287, buf289, 128, 1024, num_warps=8, num_stages=1) buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf285, buf286, buf287, primals_75, primals_76, buf290, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_76 buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32) buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0) del buf292 buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf294, buf9, buf295, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf297 = buf287 del buf287 buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_25[grid(128)](buf296, buf297, buf298, buf300, 128, 1024, num_warps=8, num_stages=1) buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf296, buf297, buf298, primals_78, primals_79, buf301, 131072, XBLOCK= 512, num_warps=8, num_stages=1) del primals_79 buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32) buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0) del buf303 buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf305, primals_80, buf306, 2048, 512, num_warps=4, num_stages=1) buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048)) buf308 = buf298 del buf298 buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_28[grid(128)](buf307, buf308, buf309, buf311, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32) triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf279, buf307, buf308, buf309, primals_81, primals_82, buf312, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_82 buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096, 4096), 0) del buf34 buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf314 buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf316, primals_83, buf317, 4096, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384 ), torch.float32) triton_per_fused_native_group_norm_33[grid(16384)](buf318, buf319, buf320, buf322, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1) buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf324 buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_34[grid(1024)](buf326, primals_86, buf327, 1024, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024)) buf329 = buf309 del buf309 buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_35[grid(128)](buf328, buf329, buf330, buf332, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_36[grid(262144)](buf328, buf329, buf330, primals_87, primals_88, buf333, 262144, XBLOCK= 1024, num_warps=4, num_stages=1) del primals_88 buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf335 buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf337, buf10, buf338, 1024, 9216, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf340 = buf330 del buf330 buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf339, buf340, buf341, buf343, 128, 512, num_warps=4, num_stages=1) buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf339, buf340, buf341, primals_90, primals_91, buf344, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_91 buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf346 buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf348, primals_92, buf349, 4096, 1024, num_warps=8, num_stages=1) buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf351 = buf341 del buf341 buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351, buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) buf356 = buf355 del buf355 triton_poi_fused_add_native_group_norm_relu_42[grid(262144)](buf356, buf318, buf319, buf320, primals_84, primals_85, buf350, buf351, buf352, primals_93, primals_94, 262144, XBLOCK=512, num_warps=8, num_stages=1) del buf320 del primals_85 del primals_94 buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf358 buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf360, primals_95, buf361, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf363 = buf352 del buf352 buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf362, buf363, buf364, buf366, 128, 512, num_warps=4, num_stages=1) buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf362, buf363, buf364, primals_96, primals_97, buf367, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_97 buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf369 buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf371, buf11, buf372, 1024, 9216, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf374 = buf364 del buf364 buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf373, buf374, buf375, buf377, 128, 512, num_warps=4, num_stages=1) buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf373, buf374, buf375, primals_99, primals_100, buf378, 65536, XBLOCK= 512, num_warps=4, num_stages=1) del primals_100 buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf380 buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf382, primals_101, buf383, 4096, 1024, num_warps=8, num_stages=1) buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf385 = buf375 del buf375 buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385, buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf356, buf384, buf385, buf386, primals_102, primals_103, buf389, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_103 buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf391 buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393, primals_104, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf396 = buf386 del buf386 buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf395, buf396, buf397, buf399, 128, 512, num_warps=4, num_stages=1) buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf395, buf396, buf397, primals_105, primals_106, buf400, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_106 buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf402 buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf404, buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf407 = buf397 del buf397 buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf406, buf407, buf408, buf410, 128, 512, num_warps=4, num_stages=1) buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf406, buf407, buf408, primals_108, primals_109, buf411, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_109 buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf413 buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf415, primals_110, buf416, 4096, 1024, num_warps=8, num_stages=1) buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf418 = buf408 del buf408 buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf417, buf418, buf419, buf421, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.float32) triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf389, buf417, buf418, buf419, primals_111, primals_112, buf422, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_112 buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf424 buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf426, primals_113, buf427, 1024, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf429 = buf419 del buf419 buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf428, buf429, buf430, buf432, 128, 512, num_warps=4, num_stages=1) buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf428, buf429, buf430, primals_114, primals_115, buf433, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_115 buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32) buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0) del buf435 buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32) triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf437, buf13, buf438, 1024, 9216, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024)) buf440 = buf430 del buf430 buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_per_fused_native_group_norm_38[grid(128)](buf439, buf440, buf441, buf443, 128, 512, num_warps=4, num_stages=1) buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32) triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf439, buf440, buf441, primals_117, primals_118, buf444, 65536, XBLOCK =512, num_warps=4, num_stages=1) del primals_118 buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32) buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0) del buf446 buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32) triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf448, primals_119, buf449, 4096, 1024, num_warps=8, num_stages=1) buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096)) buf451 = buf441 del buf441 buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch .float32) triton_red_fused_native_group_norm_41[grid(128)](buf450, buf451, buf452, buf454, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1), torch.float32) triton_poi_fused_add_native_group_norm_relu_45[grid(64, 4096)](buf422, buf450, buf451, buf452, primals_120, primals_121, buf455, 64, 4096, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del buf452 del primals_121 buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096 ), torch.bool) triton_poi_fused_threshold_backward_46[grid(16384, 16)](buf455, buf456, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8, primals_9, buf2, primals_12, primals_14, primals_15, primals_17, primals_18, buf3, primals_21, primals_23, primals_24, primals_26, primals_27, buf4, primals_30, primals_32, primals_33, primals_35, primals_36, buf5, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, buf6, primals_51, primals_53, primals_54, primals_56, primals_57, buf7, primals_60, primals_62, primals_63, primals_65, primals_66, buf8, primals_69, primals_71, primals_72, primals_74, primals_75, buf9, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, buf10, primals_90, primals_92, primals_93, primals_95, primals_96, buf11, primals_99, primals_101, primals_102, primals_104, primals_105, buf12, primals_108, primals_110, primals_111, primals_113, primals_114, buf13, primals_117, primals_119, primals_120, buf17, buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0), reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26, buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1), 0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40, buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0), reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52, buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0), reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63, buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0), reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75, buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0), reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86, buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0), reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97, buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0), reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107, buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0), reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118, buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0), reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129, buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0), reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140, buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151, buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0), reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162, buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0), reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173, buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0), reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0), reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0), reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250, buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0), reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261, buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0), reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272, buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0), reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283, buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0), reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294, buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305, buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0), reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316, buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0), reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327, buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0), reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337, buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0), reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348, buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360, buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371, buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0), reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382, buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415, buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0), reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426, buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0), reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437, buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0), reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448, buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0), reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456) def conv1x1(cin, cout, stride=1, bias=False): return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0, bias=bias) def conv3x3(cin, cout, stride=1, groups=1, bias=False): return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=bias, groups=groups) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class StdConv2d(nn.Conv2d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv2d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. """ def __init__(self, cin, cout=None, cmid=None, stride=1): super().__init__() cout = cout or cin cmid = cmid or cout // 4 self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv1 = conv1x1(cin, cmid, bias=False) self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06) self.conv2 = conv3x3(cmid, cmid, stride, bias=False) self.gn3 = nn.GroupNorm(32, cout, eps=1e-06) self.conv3 = conv1x1(cmid, cout, bias=False) self.relu = nn.ReLU(inplace=True) if stride != 1 or cin != cout: self.downsample = conv1x1(cin, cout, stride, bias=False) self.gn_proj = nn.GroupNorm(cout, cout) def forward(self, x): residual = x if hasattr(self, 'downsample'): residual = self.downsample(x) residual = self.gn_proj(residual) y = self.relu(self.gn1(self.conv1(x))) y = self.relu(self.gn2(self.conv2(y))) y = self.gn3(self.conv3(y)) y = self.relu(residual + y) return y def load_from(self, weights, n_block, n_unit): conv1_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv1/kernel'], conv=True) conv2_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv2/kernel'], conv=True) conv3_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv3/kernel'], conv=True) gn1_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn1/scale']) gn1_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn1/bias']) gn2_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn2/scale']) gn2_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn2/bias']) gn3_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn3/scale']) gn3_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn3/bias']) self.conv1.weight.copy_(conv1_weight) self.conv2.weight.copy_(conv2_weight) self.conv3.weight.copy_(conv3_weight) self.gn1.weight.copy_(gn1_weight.view(-1)) self.gn1.bias.copy_(gn1_bias.view(-1)) self.gn2.weight.copy_(gn2_weight.view(-1)) self.gn2.bias.copy_(gn2_bias.view(-1)) self.gn3.weight.copy_(gn3_weight.view(-1)) self.gn3.bias.copy_(gn3_bias.view(-1)) if hasattr(self, 'downsample'): proj_conv_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'conv_proj/kernel'], conv=True) proj_gn_weight = np2th(weights[n_block + '/' + n_unit + '/' + 'gn_proj/scale']) proj_gn_bias = np2th(weights[n_block + '/' + n_unit + '/' + 'gn_proj/bias']) self.downsample.weight.copy_(proj_conv_weight) self.gn_proj.weight.copy_(proj_gn_weight.view(-1)) self.gn_proj.bias.copy_(proj_gn_bias.view(-1)) class ResNetV2New(nn.Module): """Implementation of Pre-activation (v2) ResNet mode.""" def __init__(self, block_units, width_factor): super().__init__() width = int(64 * width_factor) self.width = width self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width, kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn. GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True )), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))])) self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential( OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width * 4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 4, cout=width * 4, cmid=width)) for i in range(2, block_units[0 ] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8, cout=width * 8, cmid=width * 2)) for i in range(2, block_units[ 1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1', PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4, stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16, cout=width * 16, cmid=width * 4)) for i in range(2, block_units [2] + 1)])))])) def forward(self, input_0): primals_1 = self.root.conv.weight primals_3 = self.root.gn.weight primals_4 = self.root.gn.bias primals_9 = self.body.block1.unit1.gn1.weight primals_10 = self.body.block1.unit1.gn1.bias primals_8 = self.body.block1.unit1.conv1.weight primals_12 = self.body.block1.unit1.gn2.weight primals_13 = self.body.block1.unit1.gn2.bias primals_11 = self.body.block1.unit1.conv2.weight primals_6 = self.body.block1.unit1.gn3.weight primals_7 = self.body.block1.unit1.gn3.bias primals_5 = self.body.block1.unit1.conv3.weight primals_14 = self.body.block1.unit1.downsample.weight primals_15 = self.body.block1.unit1.gn_proj.weight primals_16 = self.body.block1.unit1.gn_proj.bias primals_18 = self.body.block1.unit2.gn1.weight primals_19 = self.body.block1.unit2.gn1.bias primals_17 = self.body.block1.unit2.conv1.weight primals_21 = self.body.block1.unit2.gn2.weight primals_22 = self.body.block1.unit2.gn2.bias primals_20 = self.body.block1.unit2.conv2.weight primals_24 = self.body.block1.unit2.gn3.weight primals_25 = self.body.block1.unit2.gn3.bias primals_23 = self.body.block1.unit2.conv3.weight primals_27 = self.body.block1.unit3.gn1.weight primals_28 = self.body.block1.unit3.gn1.bias primals_26 = self.body.block1.unit3.conv1.weight primals_30 = self.body.block1.unit3.gn2.weight primals_31 = self.body.block1.unit3.gn2.bias primals_29 = self.body.block1.unit3.conv2.weight primals_33 = self.body.block1.unit3.gn3.weight primals_34 = self.body.block1.unit3.gn3.bias primals_32 = self.body.block1.unit3.conv3.weight primals_36 = self.body.block1.unit4.gn1.weight primals_37 = self.body.block1.unit4.gn1.bias primals_35 = self.body.block1.unit4.conv1.weight primals_39 = self.body.block1.unit4.gn2.weight primals_40 = self.body.block1.unit4.gn2.bias primals_38 = self.body.block1.unit4.conv2.weight primals_42 = self.body.block1.unit4.gn3.weight primals_43 = self.body.block1.unit4.gn3.bias primals_41 = self.body.block1.unit4.conv3.weight primals_48 = self.body.block2.unit1.gn1.weight primals_49 = self.body.block2.unit1.gn1.bias primals_47 = self.body.block2.unit1.conv1.weight primals_51 = self.body.block2.unit1.gn2.weight primals_52 = self.body.block2.unit1.gn2.bias primals_50 = self.body.block2.unit1.conv2.weight primals_45 = self.body.block2.unit1.gn3.weight primals_46 = self.body.block2.unit1.gn3.bias primals_53 = self.body.block2.unit1.conv3.weight primals_44 = self.body.block2.unit1.downsample.weight primals_54 = self.body.block2.unit1.gn_proj.weight primals_55 = self.body.block2.unit1.gn_proj.bias primals_57 = self.body.block2.unit2.gn1.weight primals_58 = self.body.block2.unit2.gn1.bias primals_56 = self.body.block2.unit2.conv1.weight primals_60 = self.body.block2.unit2.gn2.weight primals_61 = self.body.block2.unit2.gn2.bias primals_59 = self.body.block2.unit2.conv2.weight primals_63 = self.body.block2.unit2.gn3.weight primals_64 = self.body.block2.unit2.gn3.bias primals_62 = self.body.block2.unit2.conv3.weight primals_66 = self.body.block2.unit3.gn1.weight primals_67 = self.body.block2.unit3.gn1.bias primals_65 = self.body.block2.unit3.conv1.weight primals_69 = self.body.block2.unit3.gn2.weight primals_70 = self.body.block2.unit3.gn2.bias primals_68 = self.body.block2.unit3.conv2.weight primals_72 = self.body.block2.unit3.gn3.weight primals_73 = self.body.block2.unit3.gn3.bias primals_71 = self.body.block2.unit3.conv3.weight primals_75 = self.body.block2.unit4.gn1.weight primals_76 = self.body.block2.unit4.gn1.bias primals_74 = self.body.block2.unit4.conv1.weight primals_78 = self.body.block2.unit4.gn2.weight primals_79 = self.body.block2.unit4.gn2.bias primals_77 = self.body.block2.unit4.conv2.weight primals_81 = self.body.block2.unit4.gn3.weight primals_82 = self.body.block2.unit4.gn3.bias primals_80 = self.body.block2.unit4.conv3.weight primals_87 = self.body.block3.unit1.gn1.weight primals_88 = self.body.block3.unit1.gn1.bias primals_86 = self.body.block3.unit1.conv1.weight primals_90 = self.body.block3.unit1.gn2.weight primals_91 = self.body.block3.unit1.gn2.bias primals_89 = self.body.block3.unit1.conv2.weight primals_84 = self.body.block3.unit1.gn3.weight primals_85 = self.body.block3.unit1.gn3.bias primals_92 = self.body.block3.unit1.conv3.weight primals_83 = self.body.block3.unit1.downsample.weight primals_93 = self.body.block3.unit1.gn_proj.weight primals_94 = self.body.block3.unit1.gn_proj.bias primals_96 = self.body.block3.unit2.gn1.weight primals_97 = self.body.block3.unit2.gn1.bias primals_95 = self.body.block3.unit2.conv1.weight primals_99 = self.body.block3.unit2.gn2.weight primals_100 = self.body.block3.unit2.gn2.bias primals_98 = self.body.block3.unit2.conv2.weight primals_102 = self.body.block3.unit2.gn3.weight primals_103 = self.body.block3.unit2.gn3.bias primals_101 = self.body.block3.unit2.conv3.weight primals_105 = self.body.block3.unit3.gn1.weight primals_106 = self.body.block3.unit3.gn1.bias primals_104 = self.body.block3.unit3.conv1.weight primals_108 = self.body.block3.unit3.gn2.weight primals_109 = self.body.block3.unit3.gn2.bias primals_107 = self.body.block3.unit3.conv2.weight primals_111 = self.body.block3.unit3.gn3.weight primals_112 = self.body.block3.unit3.gn3.bias primals_110 = self.body.block3.unit3.conv3.weight primals_114 = self.body.block3.unit4.gn1.weight primals_115 = self.body.block3.unit4.gn1.bias primals_113 = self.body.block3.unit4.conv1.weight primals_117 = self.body.block3.unit4.gn2.weight primals_118 = self.body.block3.unit4.gn2.bias primals_116 = self.body.block3.unit4.conv2.weight primals_120 = self.body.block3.unit4.gn3.weight primals_121 = self.body.block3.unit4.gn3.bias primals_119 = self.body.block3.unit4.conv3.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121]) return output[0]
MetaMain/ViTRobust
ResNetV2
false
17,999
[ "BSD-3-Clause" ]
6
5bca523f430933469d9f82022e334839388cee7a
https://github.com/MetaMain/ViTRobust/tree/5bca523f430933469d9f82022e334839388cee7a
ConcatConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/22/c22tiykxntrcnq323gf7vohop2o6wgffirtucuhayecdqlfwwucj.py # Topologically Sorted Source Nodes: [ttx], Original ATen: [aten.cat] # Source node to ATen node mapping: # ttx => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 5 x0 = xindex % 16 x2 = (xindex // 80) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/bo/cbobw73zf5udllzw6ypzn2lpl3t5xgyic35frylcttxdzai224c5.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 3, 3), (45, 9, 3, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ttx], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 320, grid=grid(320), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 return (buf2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 5, 3, 3), (45, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class ConcatConv2d(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False): super(ConcatConv2d, self).__init__() module = nn.ConvTranspose2d if transpose else nn.Conv2d self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride =stride, padding=padding, dilation=dilation, groups=groups, bias=bias) def forward(self, t, x): tt = torch.ones_like(x[:, :1, :, :]) * t ttx = torch.cat([tt, x], 1) return self._layer(ttx) def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 5 x0 = xindex % 16 x2 = xindex // 80 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 3, 3), (45, 9, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(320)](primals_2, primals_1, buf0, 320, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf2, primals_3, buf0 class ConcatConv2dNew(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False): super(ConcatConv2dNew, self).__init__() module = nn.ConvTranspose2d if transpose else nn.Conv2d self._layer = module(dim_in + 1, dim_out, kernel_size=ksize, stride =stride, padding=padding, dilation=dilation, groups=groups, bias=bias) def forward(self, input_0, input_1): primals_3 = self._layer.weight primals_4 = self._layer.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
TevenLeScao/BasicSR
ConcatConv2d
false
18,000
[ "Apache-2.0" ]
4
1a7bd8754de00f3a9c9f2031acfc447350459ea0
https://github.com/TevenLeScao/BasicSR/tree/1a7bd8754de00f3a9c9f2031acfc447350459ea0
INN_loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xt/cxtmskuaxio3iworwhgrvr3jnw3rejx7ssw4farcwvemzulz2c3r.py # Topologically Sorted Source Nodes: [pow_1, sum_1, mul, losses, mean, loss], Original ATen: [aten.pow, aten.sum, aten.mul, aten.sub, aten.mean, aten.div] # Source node to ATen node mapping: # loss => div # losses => sub # mean => mean # mul => mul # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.5), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %arg1_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mean, 4), kwargs = {}) triton_per_fused_div_mean_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_mean_mul_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) % 4 r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (r3), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 0.5 tmp12 = tmp10 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp17 / tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [pow_1, sum_1, mul, losses, mean, loss], Original ATen: [aten.pow, aten.sum, aten.mul, aten.sub, aten.mean, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_mean_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class INN_loss(nn.Module): def __init__(self, num_dim): super(INN_loss, self).__init__() self.num_dim = num_dim def forward(self, Z, log_jac_det): losses = 0.5 * torch.sum(Z ** 2, 1) - log_jac_det loss = losses.mean() / self.num_dim return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 % 4 r3 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr1 + r3, None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 0.5 tmp12 = tmp10 * tmp11 tmp14 = tmp12 - tmp13 tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = 256.0 tmp19 = tmp17 / tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mean_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class INN_lossNew(nn.Module): def __init__(self, num_dim): super(INN_lossNew, self).__init__() self.num_dim = num_dim def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ThorstenBuss/jet-inn
INN_loss
false
18,001
[ "Apache-2.0" ]
4
3777aac712fc99aa2c48031db0c09eaebee70f37
https://github.com/ThorstenBuss/jet-inn/tree/3777aac712fc99aa2c48031db0c09eaebee70f37
Upsample
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/kx/ckxde73ml6ei3yhvknzawd63eqz4kf3ncblxyquc2ibjniys2tiq.py # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] # Source node to ATen node mapping: # interpolate => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6 # Graph fragment: # %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {}) # %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {}) # %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {}) # %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {}) # %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {}) # %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {}) # %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {}) # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {}) # %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {}) # %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {}) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x2 = (xindex // 16) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + (4*tmp13) + (16*x2)), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = triton_helpers.minimum(tmp28, tmp4) tmp30 = tmp25 * tmp29 tmp31 = tmp24 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + (4*tmp9) + (16*x2)), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp9.to(tl.float32) tmp39 = tmp8 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp7) tmp41 = triton_helpers.minimum(tmp40, tmp4) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tl.store(in_out_ptr0 + (x4), tmp43, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [interpolate], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf2, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Upsample(nn.Module): def __init__(self, scale_factor, mode='bilinear'): super().__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, input): return nn.functional.interpolate(input, scale_factor=self. scale_factor, mode=self.mode, align_corners=False) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale_factor': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0( in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 3, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tmp14 = x0 tmp15 = tmp14.to(tl.float32) tmp16 = tmp15 + tmp2 tmp17 = tmp16 * tmp4 tmp18 = tmp17 - tmp2 tmp19 = triton_helpers.maximum(tmp18, tmp7) tmp20 = tmp19.to(tl.int32) tmp21 = tmp20 + tmp10 tmp22 = triton_helpers.minimum(tmp21, tmp12) tmp23 = tl.load(in_ptr0 + (tmp22 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp24 = tl.load(in_ptr0 + (tmp20 + 4 * tmp13 + 16 * x2), xmask, eviction_policy='evict_last') tmp25 = tmp23 - tmp24 tmp26 = tmp20.to(tl.float32) tmp27 = tmp19 - tmp26 tmp28 = triton_helpers.maximum(tmp27, tmp7) tmp29 = triton_helpers.minimum(tmp28, tmp4) tmp30 = tmp25 * tmp29 tmp31 = tmp24 + tmp30 tmp32 = tl.load(in_ptr0 + (tmp20 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (tmp22 + 4 * tmp9 + 16 * x2), xmask, eviction_policy='evict_last') tmp34 = tmp33 - tmp32 tmp35 = tmp34 * tmp29 tmp36 = tmp32 + tmp35 tmp37 = tmp31 - tmp36 tmp38 = tmp9.to(tl.float32) tmp39 = tmp8 - tmp38 tmp40 = triton_helpers.maximum(tmp39, tmp7) tmp41 = triton_helpers.minimum(tmp40, tmp4) tmp42 = tmp37 * tmp41 tmp43 = tmp36 + tmp42 tl.store(in_out_ptr0 + x4, tmp43, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid (256)](buf2, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf2, class UpsampleNew(nn.Module): def __init__(self, scale_factor, mode='bilinear'): super().__init__() self.scale_factor = scale_factor self.mode = mode def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tomaz-Vieira/tiktorch
Upsample
false
18,002
[ "MIT" ]
8
2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
https://github.com/Tomaz-Vieira/tiktorch/tree/2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
SelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/sv/csv7sm5tuutym62lkvl6lihs754rod6olbwllzklu2nhsxucndhy.py # Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div] # Source node to ATen node mapping: # queries_2 => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_10, 1.4142135623730951), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 % 4)) + (16*x2) + (64*(x1 // 4))), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/rs/crseh6o63khp5g5iomuhdtddeiirk3chskjrlj2kamfz67wp2kpa.py # Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div] # Source node to ATen node mapping: # keys_2 => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_9, 1.4142135623730951), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*(x2 % 4)) + (16*x1) + (64*(x2 // 4))), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/kz/ckzqylporms4fvgcrqg44ypprwpanp6hf222rji24wskr3b44aga.py # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4d/c4dndrlfjcamjfnn3ng5agjc3ahefdgw6jcsnn6hm4ljwpbfbe7h.py # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # dot_1 => div_2, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/dj/cdjffwvn2idikuml44jvdav3vzvow2i4vqtqbymwksc7y55376j6.py # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tl.store(out_ptr0 + (x4), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ei/ceiwk7wod7mvxburubtplomqfzl6gzcuhptwcvf6cea3r3ax6gcm.py # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] # Source node to ATen node mapping: # Graph fragment: # %permute_11 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%div, [0, 2, 1]), kwargs = {}) triton_poi_fused_transpose_5 = async_compile.triton('triton_poi_fused_transpose_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_transpose_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (64*x1)), xmask) tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4, 4), (4, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [queries_2], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf1, buf3, 256, grid=grid(256), stream=stream0) buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [keys_2], Original ATen: [aten.div] triton_poi_fused_div_1.run(buf0, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [queries_2, dot], Original ATen: [aten.div, aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [dot_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf2, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf9, buf10, 256, grid=grid(256), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [aten.transpose] triton_poi_fused_transpose_5.run(buf3, buf11, 256, grid=grid(256), stream=stream0) del buf3 return (reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0), buf11, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class SelfAttention(nn.Module): def __init__(self, input_size, heads, embed_size): super().__init__() self.input_size = input_size self.heads = heads self.emb_size = embed_size self.tokeys = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.toqueries = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.tovalues = nn.Linear(self.input_size, self.emb_size * heads, bias=False) def forward(self, x): b, t, hin = x.size() assert hin == self.input_size, 'Input size {hin} should match {self.input_size}' h = self.heads e = self.emb_size keys = self.tokeys(x).view(b, t, h, e) queries = self.toqueries(x).view(b, t, h, e) values = self.tovalues(x).view(b, t, h, e) keys = keys.transpose(1, 2).contiguous().view(b * h, t, e) queries = queries.transpose(1, 2).contiguous().view(b * h, t, e) values = values.transpose(1, 2).contiguous().view(b * h, t, e) queries = queries / e ** (1 / 4) keys = keys / e ** (1 / 4) dot = torch.bmm(queries, keys.transpose(1, 2)) assert dot.size() == (b * h, t, t) dot = F.softmax(dot, dim=2) out = torch.bmm(dot, values).view(b, h, t, e) out = out.transpose(1, 2).contiguous().view(b, t, h * e) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'heads': 4, 'embed_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 % 4) + 16 * x2 + 64 * (x1 // 4)), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x2 % 4) + 16 * x1 + 64 * (x2 // 4)), xmask) tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tl.store(out_ptr0 + x4, tmp0, xmask) @triton.jit def triton_poi_fused_transpose_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), out=buf2) del primals_4 buf3 = empty_strided_cuda((16, 4, 4), (4, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(256)](buf1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_div_1[grid(256)](buf0, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0) del buf0 extern_kernels.bmm(buf3, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf6 triton_poi_fused_clone_4[grid(256)](buf2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0) del buf2 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_4[grid(256)](buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4, 4), (16, 1, 4), 0) del buf9 triton_poi_fused_transpose_5[grid(256)](buf3, buf11, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf3 return reinterpret_tensor(buf10, (4, 4, 16), (64, 16, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 4, 4), (16, 1, 4), 0 ), buf11, buf4 class SelfAttentionNew(nn.Module): def __init__(self, input_size, heads, embed_size): super().__init__() self.input_size = input_size self.heads = heads self.emb_size = embed_size self.tokeys = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.toqueries = nn.Linear(self.input_size, self.emb_size * heads, bias=False) self.tovalues = nn.Linear(self.input_size, self.emb_size * heads, bias=False) def forward(self, input_0): primals_2 = self.tokeys.weight primals_3 = self.toqueries.weight primals_4 = self.tovalues.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Sud0x67/mrmix
SelfAttention
false
18,003
[ "Apache-2.0" ]
4
4f4784e421c768509bd007e21b4455b56edc7cd2
https://github.com/Sud0x67/mrmix/tree/4f4784e421c768509bd007e21b4455b56edc7cd2
Conv2dTime
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/22/c22tiykxntrcnq323gf7vohop2o6wgffirtucuhayecdqlfwwucj.py # Topologically Sorted Source Nodes: [t_and_x], Original ATen: [aten.cat] # Source node to ATen node mapping: # t_and_x => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 5 x0 = xindex % 16 x2 = (xindex // 80) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 5, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-1) + x1)) + (64*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vb/cvbno3dccglzmlbisnwicoai3aocrgweun3buh6avsdqdjjhjczh.py # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [t_and_x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 320, grid=grid(320), stream=stream0) del primals_1 del primals_2 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0) del primals_4 return (buf2, primals_3, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 5, 4, 4), (80, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn class Conv2dTime(nn.Conv2d): """ Implements time dependent 2d convolutions, by appending the time variable as an extra channel. """ def __init__(self, in_channels, *args, **kwargs): super(Conv2dTime, self).__init__(in_channels + 1, *args, **kwargs) def forward(self, t, x): t_img = torch.ones_like(x[:, :1, :, :]) * t t_and_x = torch.cat([t_img, x], 1) return super(Conv2dTime, self).forward(t_and_x) def get_inputs(): return [torch.rand([4, 1, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 5 x0 = xindex % 16 x2 = xindex // 80 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 5, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-1 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(320)](primals_2, primals_1, buf0, 320, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 return buf2, primals_3, buf0 class Conv2dTimeNew(nn.Conv2d): """ Implements time dependent 2d convolutions, by appending the time variable as an extra channel. """ def __init__(self, in_channels, *args, **kwargs): super(Conv2dTimeNew, self).__init__(in_channels + 1, *args, **kwargs) def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
TevenLeScao/BasicSR
Conv2dTime
false
18,004
[ "Apache-2.0" ]
4
1a7bd8754de00f3a9c9f2031acfc447350459ea0
https://github.com/TevenLeScao/BasicSR/tree/1a7bd8754de00f3a9c9f2031acfc447350459ea0
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/gq/cgqaaujbd3negrd7hmcrhek3kzy5sohtyhy6r6uqf7bf5zhv6f7y.py # Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu] # Source node to ATen node mapping: # group_norm => add, add_1, mul_1, rsqrt, var_mean # out => relu # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {}) triton_per_fused_native_group_norm_relu_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + (16*x0)), tmp29, xmask) tl.store(out_ptr3 + (x0), tmp22, xmask) tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2f/c2fjqh4rkxd5m4ofss2fb5di4n3m7oktsce2obpg6r6id2jwywin.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add_4 # Graph fragment: # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu] stream0 = get_raw_stream(0) triton_per_fused_native_group_norm_relu_0.run(primals_1, primals_2, primals_3, buf0, buf3, buf12, 16, 16, grid=grid(16), stream=stream0) del primals_2 del primals_3 # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.native_group_norm, aten.relu] triton_per_fused_native_group_norm_relu_0.run(buf4, primals_5, primals_6, buf5, buf9, buf8, 16, 16, grid=grid(16), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] triton_poi_fused_add_1.run(buf11, primals_1, 256, grid=grid(256), stream=stream0) return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch.nn as nn def norm(dim): return nn.GroupNorm(min(32, dim), dim) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class ResBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(ResBlock, self).__init__() self.norm1 = norm(inplanes) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.conv1 = conv3x3(inplanes, planes, stride) self.norm2 = norm(planes) self.conv2 = conv3x3(planes, planes) def forward(self, x): shortcut = x out = self.relu(self.norm1(x)) if self.downsample is not None: shortcut = self.downsample(out) out = self.conv1(out) out = self.norm2(out) out = self.relu(out) out = self.conv2(out) return out + shortcut def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inplanes': 4, 'planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 16, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp17 = tmp0 - tmp10 tmp18 = 16.0 tmp19 = tmp16 / tmp18 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = libdevice.rsqrt(tmp21) tmp23 = tmp17 * tmp22 tmp25 = tmp23 * tmp24 tmp27 = tmp25 + tmp26 tmp28 = tl.full([1, 1], 0, tl.int32) tmp29 = triton_helpers.maximum(tmp28, tmp27) tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask) tl.store(out_ptr3 + x0, tmp22, xmask) tl.store(out_ptr0 + x0, tmp10, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_per_fused_native_group_norm_relu_0[grid(16)](primals_1, primals_2, primals_3, buf0, buf3, buf12, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_2 del primals_3 buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) triton_per_fused_native_group_norm_relu_0[grid(16)](buf4, primals_5, primals_6, buf5, buf9, buf8, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_6 buf10 = extern_kernels.convolution(buf9, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_add_1[grid(256)](buf11, primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1) return (buf11, primals_1, primals_4, primals_5, primals_7, buf3, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor( buf8, (4, 4), (4, 1), 0), buf9, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf12, (4, 4, 1), (4, 1, 1), 0)) def norm(dim): return nn.GroupNorm(min(32, dim), dim) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class ResBlockNew(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(ResBlockNew, self).__init__() self.norm1 = norm(inplanes) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.conv1 = conv3x3(inplanes, planes, stride) self.norm2 = norm(planes) self.conv2 = conv3x3(planes, planes) def forward(self, input_0): primals_2 = self.norm1.weight primals_3 = self.norm1.bias primals_4 = self.conv1.weight primals_5 = self.norm2.weight primals_6 = self.norm2.bias primals_7 = self.conv2.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
TevenLeScao/BasicSR
ResBlock
false
18,005
[ "Apache-2.0" ]
4
1a7bd8754de00f3a9c9f2031acfc447350459ea0
https://github.com/TevenLeScao/BasicSR/tree/1a7bd8754de00f3a9c9f2031acfc447350459ea0
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/sv/csvhsr4dttmoksxc2zhlkvea2rr53kddti5bcfg6ezfb2eenunvk.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # y => add, clone, rsqrt, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x2), tmp8, xmask) tl.store(out_ptr1 + (x2), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/el/celr7444hbwo4wc3wtzb2gan6ly6qteaqkc4wnh7kjzw6rnnl35c.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # y => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, 4, grid=grid(16, 4), stream=stream0) del buf0 del buf1 del primals_2 del primals_3 return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LayerNorm(nn.LayerNorm): def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True): """Layer Norm.""" super(LayerNorm, self).__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine) def forward(self, x): x = x.permute(0, 2, 1) y = super(LayerNorm, self).forward(x) y = y.permute(0, 2, 1) return y def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'normalized_shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr1 + x2, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16, 4)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_1 class LayerNormNew(nn.LayerNorm): def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True): """Layer Norm.""" super(LayerNormNew, self).__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TraceOnBrainOff/pytorch-dc-tts
LayerNorm
false
18,006
[ "MIT" ]
4
993a0fbace561729b04df2179b41a0a7ea502e93
https://github.com/TraceOnBrainOff/pytorch-dc-tts/tree/993a0fbace561729b04df2179b41a0a7ea502e93
CrossEntropy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/u2/cu2beycg2t2ghizs6f4qom7bxbxmajhdaakuyq6y2korxywhp6ba.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3t/c3t7bn6xouvnq4rkk4fdxjozl3n6litfp2xwt7l2alsgsaoagk5g.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg1_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn import functional as F import torch.optim class CrossEntropy(nn.Module): def __init__(self, ignore_label=-1, weight=None, reduction='mean'): super(CrossEntropy, self).__init__() self.ignore_label = ignore_label self.criterion = nn.CrossEntropyLoss(weight=weight, ignore_index= ignore_label, reduction=reduction) def forward(self, score, target): ph, pw = score.size(2), score.size(3) h, w = target.size(1), target.size(2) if ph != h or pw != w: score = F.interpolate(score, size=(h, w), mode='bilinear', align_corners=False) loss = self.criterion(score, target) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf0 return buf2, class CrossEntropyNew(nn.Module): def __init__(self, ignore_label=-1, weight=None, reduction='mean'): super(CrossEntropyNew, self).__init__() self.ignore_label = ignore_label self.criterion = nn.CrossEntropyLoss(weight=weight, ignore_index= ignore_label, reduction=reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TotalVariation/Flattenet
CrossEntropy
false
18,007
[ "MIT" ]
3
828d1f95f6f77dd0b681318f2a544e84cf4be834
https://github.com/TotalVariation/Flattenet/tree/828d1f95f6f77dd0b681318f2a544e84cf4be834
DistillLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/pd/cpduezn7lp5glkz4fy6cgdmobtk4shg3vh5wc375wwk7udyvnxla.py # Topologically Sorted Source Nodes: [norm, t_feat], Original ATen: [aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # norm => pow_1, pow_2, sum_1 # t_feat => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg2_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg2_1, %pow_2), kwargs = {}) triton_poi_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/w4/cw422id2etp3yn4dkoyljosek2hlzjt56672ekv6x6jopuwag7zv.py # Topologically Sorted Source Nodes: [diff, pow_1, sum_1, add, t_dist, diff_1, pow_2, sum_2, add_1, dist, sub_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.sqrt] # Source node to ATen node mapping: # add => add # add_1 => add_1 # diff => sub # diff_1 => sub_1 # dist => sqrt_1 # pow_1 => pow_3 # pow_2 => pow_6 # sub_2 => sub_2 # sum_1 => sum_2 # sum_2 => sum_4 # t_dist => sqrt # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze_2, %unsqueeze_3), kwargs = {}) # %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_6, [2]), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_4, 1e-12), kwargs = {}) # %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sqrt, %sqrt_1), kwargs = {}) triton_poi_fused_add_pow_sqrt_sub_sum_1 = async_compile.triton('triton_poi_fused_add_pow_sqrt_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_sqrt_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_pow_sqrt_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x1 = (xindex // 16) % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 1e-12 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp24 = tmp22 - tmp23 tmp25 = tmp24 * tmp24 tmp28 = tmp26 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp25 + tmp29 tmp33 = tmp31 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tmp30 + tmp34 tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp41 = tmp40 + tmp19 tmp42 = libdevice.sqrt(tmp41) tmp43 = tmp21 - tmp42 tl.store(out_ptr0 + (x3), tmp43, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nj/cnji2exptvtskb2dyz6jaoosabjqmnkbumc2nswxhjn5um3j2psy.py # Topologically Sorted Source Nodes: [eq, same_cam_mask, setitem, setitem_1, setitem_2], Original ATen: [aten.eq, aten._to_copy, aten.lift_fresh, aten.fill] # Source node to ATen node mapping: # eq => eq # same_cam_mask => convert_element_type # setitem => copy, full_default # setitem_1 => copy_1, full_default_1 # setitem_2 => copy_2, full_default_2 # Graph fragment: # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%unsqueeze_4, %unsqueeze_5), kwargs = {}) # %convert_element_type : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_1, %full_default), kwargs = {}) # %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int, %copy, 0, 0), kwargs = {}) # %select_scatter_default_1 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%convert_element_type, %select_scatter_default, 0, 0), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_8, %full_default_1), kwargs = {}) # %select_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_1, %copy_1, 0, 1), kwargs = {}) # %select_scatter_default_3 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %select_scatter_default_2, 0, 1), kwargs = {}) # %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_15, %full_default_2), kwargs = {}) # %select_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_2, %copy_2, 0, 2), kwargs = {}) # %select_scatter_default_5 : [num_users=4] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %select_scatter_default_4, 0, 2), kwargs = {}) triton_poi_fused__to_copy_eq_fill_lift_fresh_2 = async_compile.triton('triton_poi_fused__to_copy_eq_fill_lift_fresh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_eq_fill_lift_fresh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_eq_fill_lift_fresh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 256) x1 = (xindex // 64) % 4 x0 = xindex % 64 x3 = xindex % 256 x5 = xindex tmp11 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x1 tmp4 = tmp3 == tmp1 tmp5 = tl.full([1], 1, tl.int32) tmp6 = tmp1 == tmp5 tmp7 = tmp3 == tmp5 tmp8 = tl.full([1], 0, tl.int32) tmp9 = tmp5 == tmp8 tmp10 = tmp3 == tmp8 tmp13 = tmp11 == tmp12 tmp14 = tmp13.to(tl.float32) tmp15 = 0.0 tmp16 = tl.where(tmp10, tmp15, tmp14) tmp18 = tmp17 == tmp12 tmp19 = tmp18.to(tl.float32) tmp20 = tl.where(tmp9, tmp16, tmp19) tmp21 = tl.where(tmp7, tmp15, tmp20) tmp22 = tmp1 == tmp8 tmp24 = tmp23 == tmp12 tmp25 = tmp24.to(tl.float32) tmp26 = tl.where(tmp22, tmp16, tmp25) tmp27 = tl.where(tmp6, tmp21, tmp26) tmp28 = tl.where(tmp4, tmp15, tmp27) tmp29 = tmp0 == tmp5 tmp30 = tmp0 == tmp8 tmp32 = tmp31 == tmp12 tmp33 = tmp32.to(tl.float32) tmp34 = tl.where(tmp30, tmp16, tmp33) tmp35 = tl.where(tmp29, tmp21, tmp34) tmp36 = tl.where(tmp2, tmp28, tmp35) tl.store(out_ptr0 + (x5), tmp36, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3j/c3jb3ljur3emvijylzzrwg4vbe4n2bybulxo2advy6lvzmdrtxwv.py # Topologically Sorted Source Nodes: [setitem_3, diff_2, norm_2, mse_loss], Original ATen: [aten.lift_fresh, aten.fill, aten.mul, aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # diff_2 => mul # mse_loss => div_2 # norm_2 => pow_7, pow_8, sum_5 # setitem_3 => copy_3, full_default_3 # Graph fragment: # %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_22, %full_default_3), kwargs = {}) # %select_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_int_3, %copy_3, 0, 3), kwargs = {}) # %select_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %select_scatter_default_6, 0, 3), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %select_scatter_default_7), kwargs = {}) # %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul, 2), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, None), kwargs = {}) # %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 0.5), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_8, 4), kwargs = {}) triton_per_fused_div_fill_lift_fresh_linalg_vector_norm_mul_3 = async_compile.triton('triton_per_fused_div_fill_lift_fresh_linalg_vector_norm_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 1024], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_fill_lift_fresh_linalg_vector_norm_mul_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_fill_lift_fresh_linalg_vector_norm_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 1024 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex % 256 r2 = (rindex // 256) r1 = (rindex // 64) % 4 r4 = rindex tmp0 = tl.load(in_ptr0 + (r3), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (768 + r3), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (r4), None) tmp1 = r2 tmp2 = tl.full([1], 3, tl.int32) tmp3 = tmp1 == tmp2 tmp4 = r1 tmp5 = tmp4 == tmp2 tmp7 = 0.0 tmp8 = tl.where(tmp5, tmp7, tmp6) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tmp0 * tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = libdevice.sqrt(tmp15) tmp17 = 0.25 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, t_feat], Original ATen: [aten.linalg_vector_norm, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_0.run(arg2_1, buf0, 256, grid=grid(256), stream=stream0) del arg2_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm_1, feat], Original ATen: [aten.linalg_vector_norm, aten.div] triton_poi_fused_div_linalg_vector_norm_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [diff, pow_1, sum_1, add, t_dist, diff_1, pow_2, sum_2, add_1, dist, sub_2], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.sqrt] triton_poi_fused_add_pow_sqrt_sub_sum_1.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [eq, same_cam_mask, setitem, setitem_1, setitem_2], Original ATen: [aten.eq, aten._to_copy, aten.lift_fresh, aten.fill] triton_poi_fused__to_copy_eq_fill_lift_fresh_2.run(arg0_1, buf3, 1024, grid=grid(1024), stream=stream0) del arg0_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [setitem_3, diff_2, norm_2, mse_loss], Original ATen: [aten.lift_fresh, aten.fill, aten.mul, aten.linalg_vector_norm, aten.div] triton_per_fused_div_fill_lift_fresh_linalg_vector_norm_mul_3.run(buf5, buf2, buf3, 1, 1024, grid=grid(1), stream=stream0) del buf2 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class DistillLoss(nn.Module): def __init__(self): super(DistillLoss, self).__init__() def forward(self, t_feat, feat, cams): assert len(cams) == feat.shape[0] and feat.shape[0] == t_feat.shape[0] t_feat = t_feat / t_feat.norm(p=2, dim=1, keepdim=True) t_dist = self.cdist(t_feat, t_feat) feat = feat / feat.norm(p=2, dim=1, keepdim=True) dist = self.cdist(feat, feat) same_cam_mask = torch.eq(cams.unsqueeze(1), cams.unsqueeze(0)).float() for i in range(len(same_cam_mask)): same_cam_mask[i, i] = 0 same_cam_mask = same_cam_mask if cams.is_cuda else same_cam_mask diff = (t_dist - dist) * same_cam_mask mse_loss = torch.norm(diff) / feat.shape[0] return mse_loss def cdist(self, a, b): """ Returns euclidean distance between (all feature pairs) in a and b Args: a (2D Tensor): A batch of vectors shaped (B1, D) b (2D Tensor): A batch of vectors shaped (B2, D) Returns: A matrix of all pairwise distance between all vectors in a and b, will be shape of (B1, B2) """ diff = a.unsqueeze(1) - b.unsqueeze(0) return ((diff ** 2).sum(2) + 1e-12).sqrt() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) @triton.jit def triton_poi_fused_add_pow_sqrt_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp36 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp37 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tmp19 = 1e-12 tmp20 = tmp18 + tmp19 tmp21 = libdevice.sqrt(tmp20) tmp24 = tmp22 - tmp23 tmp25 = tmp24 * tmp24 tmp28 = tmp26 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp25 + tmp29 tmp33 = tmp31 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tmp30 + tmp34 tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp41 = tmp40 + tmp19 tmp42 = libdevice.sqrt(tmp41) tmp43 = tmp21 - tmp42 tl.store(out_ptr0 + x3, tmp43, xmask) @triton.jit def triton_poi_fused__to_copy_eq_fill_lift_fresh_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 256 x1 = xindex // 64 % 4 x0 = xindex % 64 x3 = xindex % 256 x5 = xindex tmp11 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (64 + x0), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x2 tmp1 = tl.full([1], 2, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = x1 tmp4 = tmp3 == tmp1 tmp5 = tl.full([1], 1, tl.int32) tmp6 = tmp1 == tmp5 tmp7 = tmp3 == tmp5 tmp8 = tl.full([1], 0, tl.int32) tmp9 = tmp5 == tmp8 tmp10 = tmp3 == tmp8 tmp13 = tmp11 == tmp12 tmp14 = tmp13.to(tl.float32) tmp15 = 0.0 tmp16 = tl.where(tmp10, tmp15, tmp14) tmp18 = tmp17 == tmp12 tmp19 = tmp18.to(tl.float32) tmp20 = tl.where(tmp9, tmp16, tmp19) tmp21 = tl.where(tmp7, tmp15, tmp20) tmp22 = tmp1 == tmp8 tmp24 = tmp23 == tmp12 tmp25 = tmp24.to(tl.float32) tmp26 = tl.where(tmp22, tmp16, tmp25) tmp27 = tl.where(tmp6, tmp21, tmp26) tmp28 = tl.where(tmp4, tmp15, tmp27) tmp29 = tmp0 == tmp5 tmp30 = tmp0 == tmp8 tmp32 = tmp31 == tmp12 tmp33 = tmp32.to(tl.float32) tmp34 = tl.where(tmp30, tmp16, tmp33) tmp35 = tl.where(tmp29, tmp21, tmp34) tmp36 = tl.where(tmp2, tmp28, tmp35) tl.store(out_ptr0 + x5, tmp36, xmask) @triton.jit def triton_per_fused_div_fill_lift_fresh_linalg_vector_norm_mul_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 1024 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex % 256 r2 = rindex // 256 r1 = rindex // 64 % 4 r4 = rindex tmp0 = tl.load(in_ptr0 + r3, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (768 + r3), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + r4, None) tmp1 = r2 tmp2 = tl.full([1], 3, tl.int32) tmp3 = tmp1 == tmp2 tmp4 = r1 tmp5 = tmp4 == tmp2 tmp7 = 0.0 tmp8 = tl.where(tmp5, tmp7, tmp6) tmp10 = tl.where(tmp3, tmp8, tmp9) tmp11 = tmp0 * tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = libdevice.sqrt(tmp15) tmp17 = 0.25 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_linalg_vector_norm_0[grid(256)](arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_linalg_vector_norm_0[grid(256)](arg1_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_pow_sqrt_sub_sum_1[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__to_copy_eq_fill_lift_fresh_2[grid(1024)](arg0_1, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_div_fill_lift_fresh_linalg_vector_norm_mul_3[grid(1)]( buf5, buf2, buf3, 1, 1024, num_warps=8, num_stages=1) del buf2 del buf3 return buf5, class DistillLossNew(nn.Module): def __init__(self): super(DistillLossNew, self).__init__() def cdist(self, a, b): """ Returns euclidean distance between (all feature pairs) in a and b Args: a (2D Tensor): A batch of vectors shaped (B1, D) b (2D Tensor): A batch of vectors shaped (B2, D) Returns: A matrix of all pairwise distance between all vectors in a and b, will be shape of (B1, B2) """ diff = a.unsqueeze(1) - b.unsqueeze(0) return ((diff ** 2).sum(2) + 1e-12).sqrt() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Terminator8758/Precise-ICS-master
DistillLoss
false
18,009
[ "MIT" ]
4
9f4591fee6ab64d9dd91f551355d29562bf663cb
https://github.com/Terminator8758/Precise-ICS-master/tree/9f4591fee6ab64d9dd91f551355d29562bf663cb
Coral
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/kz/ckzq3uusvolivjnu5dg3shfyfntwznpi3i7sjbkbdnbf52h65rud.py # Topologically Sorted Source Nodes: [mean, a], Original ATen: [aten.mean, aten.sub] # Source node to ATen node mapping: # a => sub # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {}) triton_poi_fused_mean_sub_0 = async_compile.triton('triton_poi_fused_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/oy/coydcjeyu4dv35hnoz7wqw62b7v3tl3wk26rq2jxu2uw3vm3u4hj.py # Topologically Sorted Source Nodes: [sub_2, pow_1, sum_1, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div] # Source node to ATen node mapping: # pow_1 => pow_1 # sub_2 => sub_2 # sum_1 => sum_1 # truediv => div # Graph fragment: # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mm, %mm_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 64), kwargs = {}) triton_per_fused_div_pow_sub_sum_1 = async_compile.triton('triton_per_fused_div_pow_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_pow_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_pow_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 0.015625 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, a], Original ATen: [aten.mean, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_mean_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cs], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), buf0, out=buf1) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mean_1, b], Original ATen: [aten.mean, aten.sub] triton_poi_fused_mean_sub_0.run(arg1_1, buf2, 16, grid=grid(16), stream=stream0) del arg1_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [ct], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (4, 4), (1, 4), 0), buf2, out=buf3) del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [sub_2, pow_1, sum_1, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.div] triton_per_fused_div_pow_sub_sum_1.run(buf5, buf1, buf3, 1, 16, grid=grid(1), stream=stream0) del buf1 del buf3 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class Coral(nn.Module): def __init__(self): super(Coral, self).__init__() def forward(self, a, b): """ Arguments: a: a float tensor with shape [n, d]. b: a float tensor with shape [m, d]. Returns: a float tensor with shape []. """ d = a.size(1) a = a - a.mean(0) b = b - b.mean(0) cs = torch.matmul(a.t(), a) ct = torch.matmul(b.t(), b) normalizer = 4 * d * d return ((cs - ct) ** 2).sum() / normalizer def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_per_fused_div_pow_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 0.015625 tmp8 = tmp6 * tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_sub_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), buf0, out=buf1) buf2 = buf0 del buf0 triton_poi_fused_mean_sub_0[grid(16)](arg1_1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (4, 4), (1, 4), 0), buf2, out=buf3) del buf2 buf4 = empty_strided_cuda((), (), torch.float32) buf5 = buf4 del buf4 triton_per_fused_div_pow_sub_sum_1[grid(1)](buf5, buf1, buf3, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf3 return buf5, class CoralNew(nn.Module): def __init__(self): super(CoralNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TropComplique/associative-domain-adaptation
Coral
false
18,010
[ "MIT" ]
8
a2ec0a9e678af20624f79e40c8042c969a69e8f3
https://github.com/TropComplique/associative-domain-adaptation/tree/a2ec0a9e678af20624f79e40c8042c969a69e8f3
TotalVariationLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/fk/cfkvfd2icmgmhqhey3w2ukjzvzi5eaaradubgrkt63ztysthps2j.py # Topologically Sorted Source Nodes: [sub, h_tv, mean, sub_1, w_tv, mean_1, add], Original ATen: [aten.sub, aten.pow, aten.mean, aten.add] # Source node to ATen node mapping: # add => add # h_tv => pow_1 # mean => mean # mean_1 => mean_1 # sub => sub # sub_1 => sub_1 # w_tv => pow_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0, 1, 2, 3]), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [0, 1, 2, 3]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mean_1), kwargs = {}) triton_per_fused_add_mean_pow_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex % 12 r1 = (rindex // 12) r2 = rindex % 3 r3 = (rindex // 3) tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 192.0 tmp17 = tmp7 / tmp16 tmp18 = tmp15 / tmp16 tmp19 = tmp17 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, h_tv, mean, sub_1, w_tv, mean_1, add], Original ATen: [aten.sub, aten.pow, aten.mean, aten.add] stream0 = get_raw_stream(0) triton_per_fused_add_mean_pow_sub_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class TotalVariationLoss(nn.Module): def __init__(self): super(TotalVariationLoss, self).__init__() def forward(self, x): """ Arguments: x: a float tensor with shape [b, 3, h, w]. It represents a RGB image with pixel values in [0, 1] range. Returns: a float tensor with shape []. """ h, w = x.size()[2:] h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h - 1, :], 2) w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w - 1], 2) return h_tv.mean([0, 1, 2, 3]) + w_tv.mean([0, 1, 2, 3]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 12 r1 = rindex // 12 r2 = rindex % 3 r3 = rindex // 3 tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = 192.0 tmp17 = tmp7 / tmp16 tmp18 = tmp15 / tmp16 tmp19 = tmp17 + tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_pow_sub_0[grid(1)](buf2, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class TotalVariationLossNew(nn.Module): def __init__(self): super(TotalVariationLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TropComplique/CNNMRF
TotalVariationLoss
false
18,011
[ "MIT" ]
3
602f861b14ed240acac89e6502e69f797d4f4a49
https://github.com/TropComplique/CNNMRF/tree/602f861b14ed240acac89e6502e69f797d4f4a49
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/tq/ctqlhivo3mo6eig4tnbu2s2tyo4r76vmhmtnuan7sou6pqgmtpjc.py # Topologically Sorted Source Nodes: [mean, std, sub, add, x, mul, x_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # std => var # sub => sub # x => div # x_1 => add_1 # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1]), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 1.0}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %view_1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_5), kwargs = {}) triton_per_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp28 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp0 - tmp20 tmp27 = tmp26 / tmp25 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x0), tmp25, xmask) tl.store(out_ptr0 + (r1 + (64*x0)), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf3 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = buf0; del buf0 # reuse buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf3 # reuse buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, x, mul, x_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_add_div_mean_mul_std_sub_0.run(buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, grid=grid(4), stream=stream0) del primals_2 del primals_3 return (buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_mean_mul_std_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp28 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp0 - tmp20 tmp27 = tmp26 / tmp25 tmp29 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr0 + (r1 + 64 * x0), tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf3 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = buf0 del buf0 buf5 = reinterpret_tensor(buf3, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf3 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_mean_mul_std_sub_0[grid(4)](buf1, buf5, primals_1, primals_2, primals_3, buf6, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_2 del primals_3 return buf6, primals_1, reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0), buf5 class LayerNormNew(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNormNew, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ToniChopp/MIRACLE-Paper-Sharing-Album
LayerNorm
false
18,012
[ "MIT" ]
7
72a3843101483fc8b53df2746c488da066eda2a1
https://github.com/ToniChopp/MIRACLE-Paper-Sharing-Album/tree/72a3843101483fc8b53df2746c488da066eda2a1
DistillKL
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/n7/cn7we7togo35ouvrhrbnvn2aajw35y3b2luwnik3fez2ulvmsdeq.py # Topologically Sorted Source Nodes: [p_t], Original ATen: [aten._softmax] # Source node to ATen node mapping: # p_t => exp_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {}) # %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cz/cczsklj2ifcf4srpukaggn7ow5rwycvcqopo7gytt72m4xivnomp.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {}) # %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [1], True), kwargs = {}) # %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {}) # %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hy/chylpig7m2g3kwbfm2ve2whsadrxilrpzlu5sd4o6cfxdcnsqh5t.py # Topologically Sorted Source Nodes: [p_t, kl_div, p_s, mul, loss], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # kl_div => eq, full_default, full_default_1, isnan, log_1, mul, mul_1, sub_3, sum_3, where, where_1 # loss => div_3 # mul => mul_2 # p_s => exp, log, sub_1, sum_1 # p_t => div_2, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [1], True), kwargs = {}) # %div_2 : [num_users=5] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_2,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], nan), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%div_2, 0), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_2,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %log_1), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul_1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default_1, %where), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %sub_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %mul), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_3,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 16), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 4), kwargs = {}) triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2 = async_compile.triton('triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (r3), None) tmp18 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float("nan") tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 16.0 tmp37 = tmp35 * tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp39, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p_t], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(arg0_1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [p_t, kl_div, p_s, mul, loss], Original ATen: [aten._softmax, aten.xlogy, aten._log_softmax, aten.mul, aten.sub, aten.sum, aten.div] triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2.run(buf4, buf0, buf2, 1, 256, grid=grid(1), stream=stream0) del buf0 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed class DistillKL(nn.Module): """Distilling the Knowledge in a Neural Network""" def __init__(self, T): super(DistillKL, self).__init__() self.T = T def forward(self, y_s, y_t): p_s = F.log_softmax(y_s / self.T, dim=1) p_t = F.softmax(y_t / self.T, dim=1) loss = F.kl_div(p_s, p_t, reduction='sum') * self.T ** 2 / y_s.shape[0] return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'T': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x3, tmp17, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.25 tmp16 = tmp14 * tmp15 tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr1 + r3, None) tmp18 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp20 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tmp9 = libdevice.isnan(tmp8).to(tl.int1) tmp10 = 0.0 tmp11 = tmp8 == tmp10 tmp12 = tl_math.log(tmp8) tmp13 = tmp8 * tmp12 tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = float('nan') tmp16 = tl.where(tmp9, tmp15, tmp14) tmp19 = tl_math.exp(tmp18) tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tl_math.log(tmp28) tmp30 = tmp17 - tmp29 tmp31 = tmp8 * tmp30 tmp32 = tmp16 - tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp36 = 16.0 tmp37 = tmp35 * tmp36 tmp38 = 0.25 tmp39 = tmp37 * tmp38 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp39, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](arg0_1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused__log_softmax__softmax_div_mul_sub_sum_xlogy_2[grid(1) ](buf4, buf0, buf2, 1, 256, num_warps=2, num_stages=1) del buf0 del buf2 return buf4, class DistillKLNew(nn.Module): """Distilling the Knowledge in a Neural Network""" def __init__(self, T): super(DistillKLNew, self).__init__() self.T = T def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ToniChopp/MIRACLE-Paper-Sharing-Album
DistillKL
false
18,013
[ "MIT" ]
7
72a3843101483fc8b53df2746c488da066eda2a1
https://github.com/ToniChopp/MIRACLE-Paper-Sharing-Album/tree/72a3843101483fc8b53df2746c488da066eda2a1
TwoLinearsModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/jj/cjjx7o2lisvy23acfxxg2wqaj7ti234n4nm74lhh5jr3e24mcozm.py # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] # Source node to ATen node mapping: # h_relu => clamp_min # Graph fragment: # %add_tensor : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_tensor, 0), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add_tensor, 0), kwargs = {}) triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [h_relu], Original ATen: [aten.clamp, aten.ge] stream0 = get_raw_stream(0) triton_poi_fused_clamp_ge_0.run(buf0, primals_3, buf1, buf3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0), buf1, primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 class TwoLinearsModel(nn.Module): def __init__(self, per_sample_shape: 'list', hidden_size: 'int', output_size: 'int'): super(TwoLinearsModel, self).__init__() assert len(per_sample_shape) == 3 self.per_sample_shape = per_sample_shape input_size = per_sample_shape[0] for dim in per_sample_shape[1:]: input_size *= dim self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) def forward(self, x: 'torch.Tensor'): batch_size = x.size(0) x = x.view(batch_size, -1) h_relu = self.linear1(x).clamp(min=0) y_pred = self.linear2(h_relu) return y_pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'per_sample_shape': [4, 4, 4], 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn import torch.utils.data import torch.utils.tensorboard._pytorch_graph import torch.onnx.symbolic_caffe2 assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_ge_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp2 >= tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 64), (64, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (1, 64), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_clamp_ge_0[grid(16)](buf0, primals_3, buf1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = buf0 del buf0 extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, primals_4, buf3 class TwoLinearsModelNew(nn.Module): def __init__(self, per_sample_shape: 'list', hidden_size: 'int', output_size: 'int'): super(TwoLinearsModelNew, self).__init__() assert len(per_sample_shape) == 3 self.per_sample_shape = per_sample_shape input_size = per_sample_shape[0] for dim in per_sample_shape[1:]: input_size *= dim self.linear1 = nn.Linear(input_size, hidden_size) self.linear2 = nn.Linear(hidden_size, output_size) def forward(self, input_0): primals_2 = self.linear1.weight primals_3 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Rohan-Chaudhury/aimet
TwoLinearsModel
false
18,014
[ "BSD-3-Clause" ]
3
1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
https://github.com/Rohan-Chaudhury/aimet/tree/1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
PrefModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/kg/ckgcra3mo7gcfpvj7scgza5or7qpmrbh64r75mrpkmd6dik5bpmq.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten._softmax] # Source node to ATen node mapping: # out => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vm/cvmula7ydwt7nbmqfwymglofxffgvonwffq5bcreuuk4qmdk4ic4.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten._softmax] # Source node to ATen node mapping: # out => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf0, buf1, 128, grid=grid(128), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 128, grid=grid(128), stream=stream0) del buf1 return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class PrefModel(nn.Module): def __init__(self, input_dim): super(PrefModel, self).__init__() self.combination = nn.Linear(input_dim, 2) self.softmax = nn.Softmax(1) def forward(self, features): h = self.combination(features) out = self.softmax(h) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(128)](buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(128)](buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class PrefModelNew(nn.Module): def __init__(self, input_dim): super(PrefModelNew, self).__init__() self.combination = nn.Linear(input_dim, 2) self.softmax = nn.Softmax(1) def forward(self, input_0): primals_1 = self.combination.weight primals_2 = self.combination.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
UKPLab/ijcai2019-relis
PrefModel
false
18,015
[ "MIT" ]
5
8a40762dcfa90c075a4f6591cbdceb468026ef17
https://github.com/UKPLab/ijcai2019-relis/tree/8a40762dcfa90c075a4f6591cbdceb468026ef17
TinyConvNet2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ar/car7yauiypuyt2aup7rqtsj4glkttpfh7y4zxutghlnunswymzjp.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cl/ccloe2g3pxyuzribb4x3y7k5u47ap7xk4njibghnoafatd7yw3lj.py # Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_3 => convolution_1 # input_4 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/52/c52v4zgfjclcndc4jaefeddyp5ocxegxa5cr5bkyv6tofbgpzoyx.py # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # input_5 => convolution_2 # input_6 => sigmoid # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_sigmoid_2 = async_compile.triton('triton_poi_fused_convolution_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (16, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (1, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 262144, grid=grid(262144), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_2.run(buf5, primals_7, 16384, grid=grid(16384), stream=stream0) del primals_7 return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class TinyConvNet2d(torch.nn.Module): def __init__(self, in_channels=1, out_channels=1): super().__init__() self.conv1 = torch.nn.Conv2d(in_channels, 16, 1) self.nlin1 = torch.nn.ReLU() self.conv2 = torch.nn.Conv2d(16, 64, 1) self.nlin2 = torch.nn.ReLU() self.conv3 = torch.nn.Conv2d(64, out_channels, 1) self.nlin3 = torch.nn.Sigmoid() def forward(self, x): return torch.nn.Sequential(self.conv1, self.nlin1, self.conv2, self .nlin2, self.conv3, self.nlin3)(x) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 16, 1, 1), (16, 1, 1, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (1, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64), (65536, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(1048576)](buf3, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_sigmoid_2[grid(16384)](buf5, primals_7, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5 class TinyConvNet2dNew(torch.nn.Module): def __init__(self, in_channels=1, out_channels=1): super().__init__() self.conv1 = torch.nn.Conv2d(in_channels, 16, 1) self.nlin1 = torch.nn.ReLU() self.conv2 = torch.nn.Conv2d(16, 64, 1) self.nlin2 = torch.nn.ReLU() self.conv3 = torch.nn.Conv2d(64, out_channels, 1) self.nlin3 = torch.nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Tomaz-Vieira/tiktorch
TinyConvNet2d
false
18,016
[ "MIT" ]
8
2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
https://github.com/Tomaz-Vieira/tiktorch/tree/2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
TVLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/4t/c4ta4js6r2eukfzr4pnp2aomwbqqy6vd7s3gvzjy5feoeuuanf6t.py # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, sub_1, pow_2, w_tv, add, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.div] # Source node to ATen node mapping: # add => add # h_tv => sum_1 # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # sub_1 => sub_1 # truediv => div # w_tv => sum_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 256), kwargs = {}) triton_per_fused_add_div_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex % 12 r1 = (rindex // 12) r2 = rindex % 3 r3 = (rindex // 3) tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = tmp7 + tmp15 tmp17 = 0.00390625 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp18, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, h_tv, sub_1, pow_2, w_tv, add, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.add, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_pow_sub_sum_0.run(buf2, arg0_1, 1, 192, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class TVLoss(nn.Module): def __init__(self): super(TVLoss, self).__init__() def forward(self, x): """ Arguments: x: a float tensor with shape [b, 3, h, w]. It represents a RGB image with pixel values in [0, 1] range. Returns: a float tensor with shape []. """ b, c, h, w = x.size() h_tv = torch.pow(x[:, :, 1:, :] - x[:, :, :h - 1, :], 2).sum() w_tv = torch.pow(x[:, :, :, 1:] - x[:, :, :, :w - 1], 2).sum() return (h_tv + w_tv) / (b * c * h * w) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_pow_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 12 r1 = rindex // 12 r2 = rindex % 3 r3 = rindex // 3 tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp8 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp9 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(rmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp10 = tmp8 - tmp9 tmp11 = tmp10 * tmp10 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.where(rmask, tmp12, 0) tmp15 = tl.sum(tmp14, 1)[:, None] tmp16 = tmp7 + tmp15 tmp17 = 0.00390625 tmp18 = tmp16 * tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_pow_sub_sum_0[grid(1)](buf2, arg0_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf2, class TVLossNew(nn.Module): def __init__(self): super(TVLossNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TropComplique/WESPE
TVLoss
false
18,017
[ "MIT" ]
5
84738f1ed802a3f6a4a0549677d8137997fac617
https://github.com/TropComplique/WESPE/tree/84738f1ed802a3f6a4a0549677d8137997fac617
Grayscale
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xs/cxsid2epjncxcblfg5tq2wlorxvdiv2mkbnrayvpswbwmqcr4n57.py # Topologically Sorted Source Nodes: [mul, mul_1, add, mul_2, result], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # result => add_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, 0.299), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, 0.587), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, 0.114), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_2), kwargs = {}) triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp1 = 0.299 tmp2 = tmp0 * tmp1 tmp4 = 0.587 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp8 = 0.114 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, mul_1, add, mul_2, result], Original ATen: [aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init class Grayscale(nn.Module): def __init__(self): super(Grayscale, self).__init__() def forward(self, x): """ Arguments: x: a float tensor with shape [b, 3, h, w]. It represents a RGB image with pixel values in [0, 1] range. Returns: a float tensor with shape [b, 1, h, w]. """ result = 0.299 * x[:, 0] + 0.587 * x[:, 1] + 0.114 * x[:, 2] return result.unsqueeze(1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp1 = 0.299 tmp2 = tmp0 * tmp1 tmp4 = 0.587 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp8 = 0.114 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1, 4, 4), (16, 16, 4, 1), 0), class GrayscaleNew(nn.Module): def __init__(self): super(GrayscaleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TropComplique/WESPE
Grayscale
false
18,018
[ "MIT" ]
5
84738f1ed802a3f6a4a0549677d8137997fac617
https://github.com/TropComplique/WESPE/tree/84738f1ed802a3f6a4a0549677d8137997fac617
TinyConvNet3d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/f2/cf24thyu72gqhyytu5j6upxmdx23qzwqpalg75ehihrniihzx6xk.py # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_1 => convolution # input_2 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16777216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 262144) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4p/c4p7wasrlbxbti7wzau6kxf4i2qd6b2uxjbnaux2jlytbomoh24l.py # Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # input_3 => convolution_1 # input_4 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[67108864], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 67108864 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 262144) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/dl/cdlvjkq4abgu5nr5anvdvzfcynaifad5hkrrb4bzjsxi224wemov.py # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # input_5 => convolution_2 # input_6 => sigmoid # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_sigmoid_2 = async_compile.triton('triton_poi_fused_convolution_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (16, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (64, 16, 1, 1, 1), (16, 1, 1, 1, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (1, 64, 1, 1, 1), (64, 1, 1, 1, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64, 64), (4194304, 262144, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16777216, grid=grid(16777216), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64, 64), (16777216, 262144, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 67108864, grid=grid(67108864), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_2.run(buf5, primals_7, 1048576, grid=grid(1048576), stream=stream0) del primals_7 return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 1, 1, 1, 1), (1, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 16, 1, 1, 1), (16, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 64, 1, 1, 1), (64, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class TinyConvNet3d(torch.nn.Module): def __init__(self, in_channels=1, out_channels=1): super().__init__() self.conv1 = torch.nn.Conv3d(in_channels, 16, 1) self.nlin1 = torch.nn.ReLU() self.conv2 = torch.nn.Conv3d(16, 64, 1) self.nlin2 = torch.nn.ReLU() self.conv3 = torch.nn.Conv3d(64, out_channels, 1) self.nlin3 = torch.nn.Sigmoid() def forward(self, x): return torch.nn.Sequential(self.conv1, self.nlin1, self.conv2, self .nlin2, self.conv3, self.nlin3)(x) def get_inputs(): return [torch.rand([4, 1, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 262144 % 16 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 262144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (16, 1, 1, 1, 1), (1, 1, 1, 1, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) assert_size_stride(primals_4, (64, 16, 1, 1, 1), (16, 1, 1, 1, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (1, 64, 1, 1, 1), (64, 1, 1, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 16, 64, 64, 64), (4194304, 262144, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16777216)](buf1, primals_2, 16777216, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64, 64), (16777216, 262144, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(67108864)](buf3, primals_5, 67108864, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 1, 64, 64, 64), (262144, 262144, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_sigmoid_2[grid(1048576)](buf5, primals_7, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5 class TinyConvNet3dNew(torch.nn.Module): def __init__(self, in_channels=1, out_channels=1): super().__init__() self.conv1 = torch.nn.Conv3d(in_channels, 16, 1) self.nlin1 = torch.nn.ReLU() self.conv2 = torch.nn.Conv3d(16, 64, 1) self.nlin2 = torch.nn.ReLU() self.conv3 = torch.nn.Conv3d(64, out_channels, 1) self.nlin3 = torch.nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Tomaz-Vieira/tiktorch
TinyConvNet3d
false
18,019
[ "MIT" ]
8
2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
https://github.com/Tomaz-Vieira/tiktorch/tree/2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
Dummy
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/d2/cd2iunkfm2o6kbt7ostah2yeqaomqayv7ggv5hr4yavd2ylubob4.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Dummy(nn.Module): def forward(self, input): x = input return x + 1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class DummyNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Tomaz-Vieira/tiktorch
Dummy
false
18,020
[ "MIT" ]
8
2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
https://github.com/Tomaz-Vieira/tiktorch/tree/2d6803c4ba5e26e4b27bf8af6638040fa4fc5628
AttPool
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/53/c53ntsmhklpsjsuq6datj42qy4yr2zstzbkypwwjej2xbrcade6n.py # Topologically Sorted Source Nodes: [scores, mul, x], Original ATen: [aten._softmax, aten.mul, aten.sum] # Source node to ATen node mapping: # mul => mul # scores => amax, div, exp, sub, sum_1 # x => sum_2 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [4], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [4], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_3), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [4], True), kwargs = {}) triton_poi_fused__softmax_mul_sum_0 = async_compile.triton('triton_poi_fused__softmax_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp4 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 - tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = tmp2 / tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp3 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp3 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp3 * tmp12 tmp14 = tmp11 + tmp13 tl.store(out_ptr0 + (x0), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((256, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (256, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [scores, mul, x], Original ATen: [aten._softmax, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0.run(buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0) return (buf2, primals_3, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch.nn import functional as F class AttPool(nn.Module): """ Pool representations along a dimension with learned softmax scores. Args: input_size (int): Input size. dim (int): Dimension on which to apply the attention pooling. """ def __init__(self, input_size, dim): super(AttPool, self).__init__() self.lin = nn.Linear(input_size, 1) self.dim = dim def forward(self, x): scores = F.softmax(self.lin(x), dim=self.dim) x = (scores * x).sum(dim=self.dim, keepdim=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp4 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tmp0 - tmp0 tmp2 = tl_math.exp(tmp1) tmp3 = tmp2 / tmp2 tmp5 = tmp3 * tmp4 tmp7 = tmp3 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp3 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp3 * tmp12 tmp14 = tmp11 + tmp13 tl.store(out_ptr0 + x0, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((256, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (256, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) get_raw_stream(0) triton_poi_fused__softmax_mul_sum_0[grid(256)](buf1, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf1 class AttPoolNew(nn.Module): """ Pool representations along a dimension with learned softmax scores. Args: input_size (int): Input size. dim (int): Dimension on which to apply the attention pooling. """ def __init__(self, input_size, dim): super(AttPoolNew, self).__init__() self.lin = nn.Linear(input_size, 1) self.dim = dim def forward(self, input_0): primals_1 = self.lin.weight primals_2 = self.lin.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TorchSpatiotemporal/tsl
AttPool
false
18,021
[ "MIT" ]
4
da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0
https://github.com/TorchSpatiotemporal/tsl/tree/da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0
Sobel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xl/cxl73cnmpzg5xekqai6derupmxh4i4egjbc56sdk3h4s3tkzytbv.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [1, 1], [1, 1], False, [0, 0], 3), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hq/chqxgfgu4qfj67oz7pboh3vsakbpkviwbnpj2xkp2utrtofpfjlu.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [1, 1], [1, 1], False, [0, 0], 3), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 24 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 6 y1 = (yindex // 6) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (6*x2) + (24576*y1)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4096*y3)), tmp0, ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (6, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg1_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(arg1_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0) del arg1_1 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6)) del arg0_1 del buf0 buf2 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf1, buf2, 24, 4096, grid=grid(24, 4096), stream=stream0) del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((6, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.init import torch.nn.functional as F class Sobel(nn.Module): def __init__(self): super(Sobel, self).__init__() kernel = [[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]] kernel = torch.Tensor(kernel).unsqueeze(1).repeat([3, 1, 1, 1]) self.kernel = nn.Parameter(data=kernel, requires_grad=False) def forward(self, x): """ Arguments: x: a float tensor with shape [b, 3, h, w]. It represents a RGB image with pixel values in [0, 1] range. Returns: a float tensor with shape [b, 3*2, h, w]. """ x = F.conv2d(x, self.kernel, padding=1, groups=3) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 24 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 6 y1 = yindex // 6 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 6 * x2 + 24576 * y1), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4096 * y3), tmp0, ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (6, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(arg1_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(12, 4096)](arg1_1, buf0, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del arg1_1 buf1 = extern_kernels.convolution(buf0, arg0_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6)) del arg0_1 del buf0 buf2 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_1[grid(24, 4096)](buf1, buf2, 24, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf1 return buf2, class SobelNew(nn.Module): def __init__(self): super(SobelNew, self).__init__() kernel = [[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]] kernel = torch.Tensor(kernel).unsqueeze(1).repeat([3, 1, 1, 1]) self.kernel = nn.Parameter(data=kernel, requires_grad=False) def forward(self, input_0): arg0_1 = self.kernel arg1_1 = input_0 output = call([arg0_1, arg1_1]) return output[0]
TropComplique/WESPE
Sobel
false
18,023
[ "MIT" ]
5
84738f1ed802a3f6a4a0549677d8137997fac617
https://github.com/TropComplique/WESPE/tree/84738f1ed802a3f6a4a0549677d8137997fac617
MaxPool3x3
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/r6/cr63646lmkteelgl5qsjhd5ljcimmvo4mupanu2rzy42gq5f6vzq.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x => getitem # Graph fragment: # %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x0 = xindex % 4 x4 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + x4), tmp10 & xmask, other=float("-inf")) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + x4), tmp16 & xmask, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + x4), tmp23 & xmask, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-1) + x4), tmp30 & xmask, other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (x4), tmp33 & xmask, other=float("-inf")) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=float("-inf")) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=float("-inf")) tmp51 = triton_helpers.maximum(tmp50, tmp48) tl.store(out_ptr0 + (x4), tmp51, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices] stream0 = get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MaxPool3x3(nn.Module): """3x3 max pool with no subsampling.""" def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(MaxPool3x3, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size, stride, padding) def forward(self, x): x = self.maxpool(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tl.store(out_ptr0 + x4, tmp51, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MaxPool3x3New(nn.Module): """3x3 max pool with no subsampling.""" def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): super(MaxPool3x3New, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size, stride, padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
VascoLopes/GEA
MaxPool3x3
false
18,024
[ "MIT" ]
4
ab80dbb9851dfc215102e5222e8d5f70e855dd15
https://github.com/VascoLopes/GEA/tree/ab80dbb9851dfc215102e5222e8d5f70e855dd15
RelativeMargin
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xz/cxzklpvpy4y6vxrefow6z6cfbj7ymiihjl2cpxuzxrmr3mljuxo5.py # Topologically Sorted Source Nodes: [sub, abs_1, sub_1, mul, sub_2, clamp, loss], Original ATen: [aten.sub, aten.abs, aten.mul, aten.clamp, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # clamp => clamp_min # loss => mean # mul => mul # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg3_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg4_1, %sub_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, %mul), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min,), kwargs = {}) triton_per_fused_abs_clamp_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_clamp_mean_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_clamp_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_clamp_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp4 = tl.load(in_ptr2 + (r0), None) tmp5 = tl.load(in_ptr3 + (r0), None) tmp6 = tl.load(in_ptr4 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp7 = tmp5 - tmp6 tmp8 = tmp4 * tmp7 tmp9 = tmp3 - tmp8 tmp10 = 0.0 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, abs_1, sub_1, mul, sub_2, clamp, loss], Original ATen: [aten.sub, aten.abs, aten.mul, aten.clamp, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_clamp_mean_mul_sub_0.run(buf1, arg0_1, arg1_1, arg4_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class RelativeMargin(nn.Module): def __init__(self): super(RelativeMargin, self).__init__() def forward(self, x1, x2, y1, y2, t, reduce=True): if reduce: loss = torch.mean(torch.clamp(torch.abs(y1 - y2) - t * (x1 - x2 ), 0.0)) else: loss = torch.sum(torch.clamp(torch.abs(y1 - y2) - t * (x1 - x2), 0.0)) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_clamp_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp4 = tl.load(in_ptr2 + r0, None) tmp5 = tl.load(in_ptr3 + r0, None) tmp6 = tl.load(in_ptr4 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp7 = tmp5 - tmp6 tmp8 = tmp4 * tmp7 tmp9 = tmp3 - tmp8 tmp10 = 0.0 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_clamp_mean_mul_sub_0[grid(1)](buf1, arg0_1, arg1_1, arg4_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return buf1, class RelativeMarginNew(nn.Module): def __init__(self): super(RelativeMarginNew, self).__init__() def forward(self, input_0, input_1, input_2, input_3, input_4): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
UKPLab/ijcai2019-relis
RelativeMargin
false
18,025
[ "MIT" ]
5
8a40762dcfa90c075a4f6591cbdceb468026ef17
https://github.com/UKPLab/ijcai2019-relis/tree/8a40762dcfa90c075a4f6591cbdceb468026ef17
TVLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/sc/cscbfuqxin7zvzm4pxdb3ugrt53xae5u7ddhweok7twmbtdh6qtf.py # Topologically Sorted Source Nodes: [sub, dyh, sub_1, dyhath, sub_4, norm, truediv, sub_2, dyw, sub_3, dyhatw, sub_5, norm_1, truediv_1, error], Original ATen: [aten.sub, aten.abs, aten.linalg_vector_norm, aten.div, aten.add] # Source node to ATen node mapping: # dyh => abs_1 # dyhath => abs_2 # dyhatw => abs_4 # dyw => abs_3 # error => add # norm => abs_5, pow_2, sum_1 # norm_1 => abs_6, pow_4, sum_2 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # sub_3 => sub_3 # sub_4 => sub_4 # sub_5 => sub_5 # truediv => div # truediv_1 => div_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_11, %slice_15), kwargs = {}) # %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, %abs_2), kwargs = {}) # %abs_5 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_4,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_5, None), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_2, 4), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_20, %slice_24), kwargs = {}) # %abs_3 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_2,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_28, %slice_32), kwargs = {}) # %abs_4 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_3,), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_3, %abs_4), kwargs = {}) # %abs_6 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_5,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_6, None), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 1.0), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_4, 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {}) triton_per_fused_abs_add_div_linalg_vector_norm_sub_0 = async_compile.triton('triton_per_fused_abs_add_div_linalg_vector_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_linalg_vector_norm_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_add_div_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = rindex < rnumel r0 = rindex % 12 r1 = (rindex // 12) r2 = rindex % 3 r3 = (rindex // 3) tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0) tmp4 = tl.load(in_ptr1 + (4 + r0 + (16*r1)), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + (r0 + (16*r1)), rmask, other=0.0) tmp14 = tl.load(in_ptr0 + (1 + r2 + (4*r3)), rmask, other=0.0) tmp15 = tl.load(in_ptr0 + (r2 + (4*r3)), rmask, other=0.0) tmp18 = tl.load(in_ptr1 + (1 + r2 + (4*r3)), rmask, other=0.0) tmp19 = tl.load(in_ptr1 + (r2 + (4*r3)), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tl_math.abs(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(rmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp20 = tmp18 - tmp19 tmp21 = tl_math.abs(tmp20) tmp22 = tmp17 - tmp21 tmp23 = tl_math.abs(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.where(rmask, tmp24, 0) tmp27 = tl.sum(tmp26, 1)[:, None] tmp28 = 0.25 tmp29 = tmp13 * tmp28 tmp30 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp31, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, dyh, sub_1, dyhath, sub_4, norm, truediv, sub_2, dyw, sub_3, dyhatw, sub_5, norm_1, truediv_1, error], Original ATen: [aten.sub, aten.abs, aten.linalg_vector_norm, aten.div, aten.add] stream0 = get_raw_stream(0) triton_per_fused_abs_add_div_linalg_vector_norm_sub_0.run(buf2, arg0_1, arg1_1, 1, 192, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class TVLoss(nn.Module): """ Total variation loss. """ def __init__(self): super(TVLoss, self).__init__() def forward(self, yhat, y): _bsize, _chan, height, width = y.size() dyh = torch.abs(y[:, :, 1:, :] - y[:, :, :-1, :]) dyhath = torch.abs(yhat[:, :, 1:, :] - yhat[:, :, :-1, :]) dyw = torch.abs(y[:, :, :, 1:] - y[:, :, :, :-1]) dyhatw = torch.abs(yhat[:, :, :, 1:] - yhat[:, :, :, :-1]) error = torch.norm(dyh - dyhath, 1) / height + torch.norm(dyw - dyhatw, 1) / width return error def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_div_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): rnumel = 192 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex % 12 r1 = rindex // 12 r2 = rindex % 3 r3 = rindex // 3 tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0) tmp4 = tl.load(in_ptr1 + (4 + r0 + 16 * r1), rmask, other=0.0) tmp5 = tl.load(in_ptr1 + (r0 + 16 * r1), rmask, other=0.0) tmp14 = tl.load(in_ptr0 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp15 = tl.load(in_ptr0 + (r2 + 4 * r3), rmask, other=0.0) tmp18 = tl.load(in_ptr1 + (1 + r2 + 4 * r3), rmask, other=0.0) tmp19 = tl.load(in_ptr1 + (r2 + 4 * r3), rmask, other=0.0) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 - tmp7 tmp9 = tl_math.abs(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(rmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp20 = tmp18 - tmp19 tmp21 = tl_math.abs(tmp20) tmp22 = tmp17 - tmp21 tmp23 = tl_math.abs(tmp22) tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.where(rmask, tmp24, 0) tmp27 = tl.sum(tmp26, 1)[:, None] tmp28 = 0.25 tmp29 = tmp13 * tmp28 tmp30 = tmp27 * tmp28 tmp31 = tmp29 + tmp30 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp31, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_div_linalg_vector_norm_sub_0[grid(1)](buf2, arg0_1, arg1_1, 1, 192, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class TVLossNew(nn.Module): """ Total variation loss. """ def __init__(self): super(TVLossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TiagoCortinhal/SR_GAN
TVLoss
false
18,026
[ "MIT" ]
4
9ccceaa25e87e404d20825dbb552fa6a2ef3af47
https://github.com/TiagoCortinhal/SR_GAN/tree/9ccceaa25e87e404d20825dbb552fa6a2ef3af47
SoftDetectionModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/y5/cy5ew5cec2fvefladh4ieh7p66fnqja2hfmujjkcyy3k3637uoop.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => max_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {}) triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/na/cnafbglpaavplegdnm7eonqtmt7xqqdnt5cpuhks7muibqm2xc4p.py # Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd] # Source node to ATen node mapping: # batch => relu # exp => exp # pad => constant_pad_nd # truediv => div # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {}) triton_poi_fused_constant_pad_nd_div_exp_relu_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_div_exp_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_div_exp_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 6) % 6 x0 = xindex % 6 x4 = (xindex // 36) x3 = (xindex // 144) x6 = xindex tmp0 = (-1) + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = (-1) + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x4)), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + (x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tl.store(out_ptr0 + (x6), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/m7/cm7kircrrpucwwjo3gk6fvkx5q7mfdmxfdx5i46yrkgxuhbrhdji.py # Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # batch => relu # exp => exp # pad => constant_pad_nd # truediv => div # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {}) # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [1, 1]), kwargs = {}) triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (36*x2)), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + (6*x1) + (36*x2)), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + (6*x1) + (36*x2)), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + (6*x1) + (36*x2)), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + (6*x1) + (36*x2)), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + (6*x1) + (36*x2)), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (36*x2)), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + (6*x1) + (36*x2)), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + (6*x1) + (36*x2)), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tl.store(out_ptr0 + (x3), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vo/cvogfq5jjzsnnhmefpwoun4de2pfzdijmj2aqtble7qxzjhmkybt.py # Topologically Sorted Source Nodes: [batch, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, add, score_1], Original ATen: [aten.relu, aten.div, aten.exp, aten.mul, aten.max, aten.sum, aten.add] # Source node to ATen node mapping: # add => add # all_scores => mul_1 # batch => relu # depth_wise_max_score => div_2 # exp => exp # local_max_score => div_1 # max_3 => max_3 # score_1 => div_3 # sum_1 => sum_1 # sum_exp => mul # truediv => div # Graph fragment: # %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 9), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %mul), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %unsqueeze), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div_2), kwargs = {}) # %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul_1, 1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_2, [1]), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1e-05), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%getitem_4, %add), kwargs = {}) triton_per_fused_add_div_exp_max_mul_relu_sum_3 = async_compile.triton('triton_per_fused_add_div_exp_max_mul_relu_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_max_mul_relu_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (r1 + (64*x0)), xmask, other=0.0) tmp10 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp16 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp23 = tl.load(in_ptr2 + (16 + r1 + (64*x0)), xmask, other=0.0) tmp31 = tl.load(in_ptr2 + (32 + r1 + (64*x0)), xmask, other=0.0) tmp39 = tl.load(in_ptr2 + (48 + r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 / tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = 9.0 tmp8 = tmp6 * tmp7 tmp9 = tmp5 / tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = triton_helpers.maximum(tmp2, tmp11) tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp2 / tmp18 tmp20 = tmp9 * tmp19 tmp21 = tmp11 / tmp3 tmp22 = tl_math.exp(tmp21) tmp24 = tmp23 * tmp7 tmp25 = tmp22 / tmp24 tmp26 = tmp11 / tmp18 tmp27 = tmp25 * tmp26 tmp28 = triton_helpers.maximum(tmp20, tmp27) tmp29 = tmp14 / tmp3 tmp30 = tl_math.exp(tmp29) tmp32 = tmp31 * tmp7 tmp33 = tmp30 / tmp32 tmp34 = tmp14 / tmp18 tmp35 = tmp33 * tmp34 tmp36 = triton_helpers.maximum(tmp28, tmp35) tmp37 = tmp17 / tmp3 tmp38 = tl_math.exp(tmp37) tmp40 = tmp39 * tmp7 tmp41 = tmp38 / tmp40 tmp42 = tmp17 / tmp18 tmp43 = tmp41 * tmp42 tmp44 = triton_helpers.maximum(tmp36, tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.where(xmask, tmp45, 0) tmp48 = tl.sum(tmp47, 1)[:, None] tmp49 = 1e-05 tmp50 = tmp48 + tmp49 tmp51 = tmp44 / tmp50 tl.store(out_ptr2 + (r1 + (16*x0)), tmp51, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_per_fused_max_0.run(arg0_1, buf0, 4, 64, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) # Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd] triton_poi_fused_constant_pad_nd_div_exp_relu_1.run(arg0_1, buf0, buf2, 576, grid=grid(576), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d] triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) del buf2 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [batch, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, add, score_1], Original ATen: [aten.relu, aten.div, aten.exp, aten.mul, aten.max, aten.sum, aten.add] triton_per_fused_add_div_exp_max_mul_relu_sum_3.run(arg0_1, buf0, buf3, buf6, 4, 16, grid=grid(4), stream=stream0) del arg0_1 del buf0 del buf3 return (buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class SoftDetectionModule(nn.Module): def __init__(self, soft_local_max_size=3): super(SoftDetectionModule, self).__init__() self.soft_local_max_size = soft_local_max_size self.pad = self.soft_local_max_size // 2 def forward(self, batch): b = batch.size(0) batch = F.relu(batch) max_per_sample = torch.max(batch.view(b, -1), dim=1)[0] exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1)) sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [ self.pad] * 4, mode='constant', value=1.0), self. soft_local_max_size, stride=1) local_max_score = exp / sum_exp depth_wise_max = torch.max(batch, dim=1)[0] depth_wise_max_score = batch / depth_wise_max.unsqueeze(1) all_scores = local_max_score * depth_wise_max_score score = torch.max(all_scores, dim=1)[0] score = score / (torch.sum(score.view(b, -1), dim=1).view(b, 1, 1) + 1e-05) return score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 6 % 6 x0 = xindex % 6 x4 = xindex // 36 x3 = xindex // 144 x6 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask, other=0.0) tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.load(in_ptr1 + x3, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 / tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype) tmp18 = tl.where(tmp10, tmp16, tmp17) tl.store(out_ptr0 + x6, tmp18, xmask) @triton.jit def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tl.store(out_ptr0 + x3, tmp18, xmask) @triton.jit def triton_per_fused_add_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0) tmp10 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp16 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp23 = tl.load(in_ptr2 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp31 = tl.load(in_ptr2 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp39 = tl.load(in_ptr2 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 / tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = 9.0 tmp8 = tmp6 * tmp7 tmp9 = tmp5 / tmp8 tmp11 = triton_helpers.maximum(tmp1, tmp10) tmp12 = triton_helpers.maximum(tmp2, tmp11) tmp14 = triton_helpers.maximum(tmp1, tmp13) tmp15 = triton_helpers.maximum(tmp12, tmp14) tmp17 = triton_helpers.maximum(tmp1, tmp16) tmp18 = triton_helpers.maximum(tmp15, tmp17) tmp19 = tmp2 / tmp18 tmp20 = tmp9 * tmp19 tmp21 = tmp11 / tmp3 tmp22 = tl_math.exp(tmp21) tmp24 = tmp23 * tmp7 tmp25 = tmp22 / tmp24 tmp26 = tmp11 / tmp18 tmp27 = tmp25 * tmp26 tmp28 = triton_helpers.maximum(tmp20, tmp27) tmp29 = tmp14 / tmp3 tmp30 = tl_math.exp(tmp29) tmp32 = tmp31 * tmp7 tmp33 = tmp30 / tmp32 tmp34 = tmp14 / tmp18 tmp35 = tmp33 * tmp34 tmp36 = triton_helpers.maximum(tmp28, tmp35) tmp37 = tmp17 / tmp3 tmp38 = tl_math.exp(tmp37) tmp40 = tmp39 * tmp7 tmp41 = tmp38 / tmp40 tmp42 = tmp17 / tmp18 tmp43 = tmp41 * tmp42 tmp44 = triton_helpers.maximum(tmp36, tmp43) tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK]) tmp47 = tl.where(xmask, tmp45, 0) tmp48 = tl.sum(tmp47, 1)[:, None] tmp49 = 1e-05 tmp50 = tmp48 + tmp49 tmp51 = tmp44 / tmp50 tl.store(out_ptr2 + (r1 + 16 * x0), tmp51, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_max_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) triton_poi_fused_constant_pad_nd_div_exp_relu_1[grid(576)](arg0_1, buf0, buf2, 576, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2[grid(256)]( buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_per_fused_add_div_exp_max_mul_relu_sum_3[grid(4)](arg0_1, buf0, buf3, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf0 del buf3 return buf6, class SoftDetectionModuleNew(nn.Module): def __init__(self, soft_local_max_size=3): super(SoftDetectionModuleNew, self).__init__() self.soft_local_max_size = soft_local_max_size self.pad = self.soft_local_max_size // 2 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
UditSinghParihar/d2-net
SoftDetectionModule
false
18,027
[ "BSD-3-Clause-Clear" ]
6
b3592beebe6759cf4cc1acdfd23d603ef059ef30
https://github.com/UditSinghParihar/d2-net/tree/b3592beebe6759cf4cc1acdfd23d603ef059ef30
FRN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/cs/ccsc7yopxjonfnc354tpnjrivhnijup7yf4q33essmzkn5qvr4md.py # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, x, mul_1, add_1, max_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # max_1 => maximum # mean => mean # mul_1 => mul_1 # pow_1 => pow_1 # rsqrt => rsqrt # x => mul # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2, 3], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %mul), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%add_1, %primals_4), kwargs = {}) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp11 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp12 = tmp0 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp17 = triton_helpers.maximum(tmp15, tmp16) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr0 + (r1 + (16*x0)), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, x, mul_1, add_1, max_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] stream0 = get_raw_stream(0) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0.run(buf1, primals_1, primals_2, primals_3, primals_4, buf2, 16, 16, grid=grid(16), stream=stream0) return (buf2, primals_1, primals_2, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class FRN(nn.Module): def __init__(self, num_features, eps=1e-05): super(FRN, self).__init__() self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.gamma = nn.Parameter(torch.ones(1, num_features, 1, 1)) self.beta = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.eps = eps def forward(self, x): x = x * torch.rsqrt(torch.mean(x ** 2, dim=[2, 3], keepdim=True) + self.eps) return torch.max(self.gamma * x + self.beta, self.tau) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp11 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp12 = tmp0 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp17 = triton_helpers.maximum(tmp15, tmp16) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp17, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0[grid(16)](buf1, primals_1, primals_2, primals_3, primals_4, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) return buf2, primals_1, primals_2, primals_3, primals_4, buf1 class FRNNew(nn.Module): def __init__(self, num_features, eps=1e-05): super(FRNNew, self).__init__() self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.gamma = nn.Parameter(torch.ones(1, num_features, 1, 1)) self.beta = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.eps = eps def forward(self, input_0): primals_2 = self.tau primals_3 = self.gamma primals_4 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
UdonDa/StarGAN-v2-pytorch-nonofficial
FRN
false
18,028
[ "MIT" ]
9
219df6b7fd4bd533686e2093ee914a337914ca9b
https://github.com/UdonDa/StarGAN-v2-pytorch-nonofficial/tree/219df6b7fd4bd533686e2093ee914a337914ca9b
Discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/cn/ccnvkf7kfnskbbfy2kwx55oghjftngamwdttghryrfs4g3fay72l.py # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7o/c7ofyv54vqgy4vn637mw3dfkal2vp3ujoh4qbckoc7ki247ygkdx.py # Topologically Sorted Source Nodes: [h2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h2 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2j/c2jp642zvr4ghkjr4dtqfs6gtucat44jwfzo45rsswlx3dgp5sxw.py # Topologically Sorted Source Nodes: [score], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # score => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {}) triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 256), (256, 1)) assert_size_stride(primals_5, (512, ), (1, )) assert_size_stride(primals_6, (1, 512), (512, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 16384, grid=grid(16384), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 512), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) # Topologically Sorted Source Nodes: [h2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 32768, grid=grid(32768), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 1), (1, 512), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [score], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_2.run(buf5, primals_7, 64, grid=grid(64), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(buf3, (64, 512), (512, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Discriminator(nn.Module): def __init__(self, in_dim, hidden_dim=100): super(Discriminator, self).__init__() self.fc1 = nn.Linear(in_dim, 256) nn.init.xavier_normal(self.fc1.weight) nn.init.constant(self.fc1.bias, 0.0) self.fc2 = nn.Linear(256, 512) nn.init.xavier_normal(self.fc2.weight) nn.init.constant(self.fc2.bias, 0.0) self.fc3 = nn.Linear(512, 1) nn.init.xavier_normal(self.fc3.weight) nn.init.constant(self.fc3.bias, 0.0) def forward(self, x, TASK=2): h1 = F.relu(self.fc1(x)) h2 = F.relu(self.fc2(h1)) if TASK == 1 or TASK == 2: score = F.sigmoid(self.fc3(h2)) else: score = self.fc3(h2) return score def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (512, 256), (256, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (1, 512), (512, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 512), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 512), (8192, 2048, 512, 1), 0 ) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 512), (8192, 2048, 512, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(32768)](buf3, primals_5, buf6, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 512), (512, 1), 0), reinterpret_tensor(primals_6, (512, 1), (1, 512), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_sigmoid_2[grid(64)](buf5, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 512), (512, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 class DiscriminatorNew(nn.Module): def __init__(self, in_dim, hidden_dim=100): super(DiscriminatorNew, self).__init__() self.fc1 = nn.Linear(in_dim, 256) nn.init.xavier_normal(self.fc1.weight) nn.init.constant(self.fc1.bias, 0.0) self.fc2 = nn.Linear(256, 512) nn.init.xavier_normal(self.fc2.weight) nn.init.constant(self.fc2.bias, 0.0) self.fc3 = nn.Linear(512, 1) nn.init.xavier_normal(self.fc3.weight) nn.init.constant(self.fc3.bias, 0.0) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Vahe1994/ThreeDLAPGAN
Discriminator
false
18,029
[ "MIT" ]
6
7e8f20be9216bc741bbe22ed2a13c261f78db521
https://github.com/Vahe1994/ThreeDLAPGAN/tree/7e8f20be9216bc741bbe22ed2a13c261f78db521
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/nn/cnnz6kyugk4ex7z2gna45k3yovarjt3y33yp6vb6ho7xpwkopvda.py # Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # exp => exp # exp_1 => exp_1 # exp_2 => exp_3 # invprobs => abs_1, exp_2, full_default, log1p, minimum, neg_4, sub_3 # log => log # loss => add_2 # loss_1 => mul_4 # max_val => clamp_min # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # mul_3 => mul_3 # neg => neg # neg_1 => neg_1 # neg_2 => neg_2 # neg_3 => neg_3 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %sub_2), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul_2), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_2,), kwargs = {}) # %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_4,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 2), kwargs = {}) # %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mul), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%neg, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %clamp_min), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %clamp_min), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %log), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %add_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {}) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0 = async_compile.triton('triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tl_math.exp(tmp15) tmp17 = tmp0 * tmp2 tmp18 = tmp0 - tmp17 tmp19 = triton_helpers.maximum(tmp1, tmp8) tmp20 = tmp18 + tmp19 tmp21 = -tmp19 tmp22 = tl_math.exp(tmp21) tmp23 = tmp1 - tmp19 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tl_math.log(tmp25) tmp27 = tmp20 + tmp26 tmp28 = tmp16 * tmp27 tmp29 = tl.broadcast_to(tmp28, [RBLOCK]) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0)) tmp32 = 256.0 tmp33 = tmp31 / tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp33, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import functional as F from torch import nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, gamma: 'int'=2) ->None: super().__init__() self.gamma = gamma def forward(self, output: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: max_val = (-output).clamp(min=0) loss = output - output * target + max_val + ((-max_val).exp() + (- output - max_val).exp()).log() invprobs = F.logsigmoid(-output * (target * 2 - 1)) loss = (invprobs * self.gamma).exp() * loss return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = -tmp0 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp7) tmp10 = tl_math.abs(tmp7) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = tmp14 * tmp3 tmp16 = tl_math.exp(tmp15) tmp17 = tmp0 * tmp2 tmp18 = tmp0 - tmp17 tmp19 = triton_helpers.maximum(tmp1, tmp8) tmp20 = tmp18 + tmp19 tmp21 = -tmp19 tmp22 = tl_math.exp(tmp21) tmp23 = tmp1 - tmp19 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tl_math.log(tmp25) tmp27 = tmp20 + tmp26 tmp28 = tmp16 * tmp27 tmp29 = tl.broadcast_to(tmp28, [RBLOCK]) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp29, 0)) tmp32 = 256.0 tmp33 = tmp31 / tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp33, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_0[ grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class FocalLossNew(nn.Module): def __init__(self, gamma: 'int'=2) ->None: super().__init__() self.gamma = gamma def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TylerYep/ml-toolkit
FocalLoss
false
18,030
[ "MIT" ]
7
095bdce961133acc720f90b6d1bbb0a7becbfc9f
https://github.com/TylerYep/ml-toolkit/tree/095bdce961133acc720f90b6d1bbb0a7becbfc9f
Block_local
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/dd/cddxjos5zg6ivsqzt3remzqjikecvr6cwndrq7dry5npgdjitz4j.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => clone, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%slice_3,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tj/ctj67zwvxkn6zybcmdmofo3yjmjovtrogyocskd3j4p4eugdylw2.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%slice_3,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp3 - tmp1 tmp5 = tmp4 * tmp4 tmp7 = tmp6 - tmp1 tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = 2.0 tmp11 = tmp9 / tmp10 tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp2 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/z3/cz37ntixbjxct565k3tmqx4iahd43turtvka2marhfybzhrf2jcu.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (6*x2) + (24*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/h5/ch5sy4o2j3vwyseefpt2yxkawowpkmdk4n2u5lxqhjwzp7unzn23.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (2 + y0 + (6*x2) + (24*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tg/ctgv6okzvnqaciyhr4cpovdycprzyzxo6kdtjsaijmrdbs5g6ctz.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ms/cmsamobvtgpcngoz7ohygup463c2zjspohjzxxw56j3vfq25ym2t.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/z4/cz4hjyip5ujeaa3wem4y3h2dvknwct2ve4gasplhoo6uleznshiw.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (6*x2) + (24*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tw/ctwzpgu7n3eaunk2l6on2neuc5bkji2irtk5rdquifrd5vjkkdtk.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_1 => clone_5 # Graph fragment: # %clone_5 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 2], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (8*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (2*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/iu/ciue4tt2ykeqhjg4vgkqmfe4rulzkal644lr5x2mzvo3xgiif4ij.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => clone_7, var_mean_1 # Graph fragment: # %clone_7 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_7, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/lg/clghtobkzvsf4h3l4gkf23lpcxdywpanep4o5mwpxlm7frt7izaw.py # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_3, add_4, clone_7, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %clone_7 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_7, [2]), kwargs = {correction: 0, keepdim: True}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_7, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_7), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_8), kwargs = {}) triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp3 - tmp1 tmp5 = tmp4 * tmp4 tmp7 = tmp6 - tmp1 tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = 2.0 tmp11 = tmp9 / tmp10 tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp2 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5h/c5hgsexytz2bqselgbvv7gky2yigpkrui42ogt6f2bzkszts3alb.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_6, %primals_11, %primals_12, [1], [1], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = (yindex // 2) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (8*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ft/cftewgga6ugg2sa2oyowbcco63pzzevcf674fuh72bcb57pxxg2y.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%add_2, %permute_7], 2), kwargs = {}) triton_poi_fused_cat_11 = async_compile.triton('triton_poi_fused_cat_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = (xindex // 4) x1 = (xindex // 4) % 4 x2 = (xindex // 16) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((2*x3) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 4, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (x1 + (4*((-2) + x0)) + (8*x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr3 + ((-2) + x0), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + (x4), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/53/c53tpy3d3a3wkuzfjotjgmcseh2jpa6kc76s6fvt3okgdl4qdnkh.py # Topologically Sorted Source Nodes: [x_7, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_2 => var_mean_2 # x_7 => add_5 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat), kwargs = {}) # %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_12 = async_compile.triton('triton_poi_fused_add_native_layer_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/f7/cf74fjgi4yq33u3yk5h6d4s6udeqfnlrywad7ski3a7jd5piyxyc.py # Topologically Sorted Source Nodes: [x_7, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_2 => add_6, add_7, mul_5, mul_6, rsqrt_2, sub_3 # x_7 => add_5 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-06), kwargs = {}) # %rsqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_5), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_2), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_13), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_14), kwargs = {}) triton_poi_fused_add_native_layer_norm_13 = async_compile.triton('triton_poi_fused_add_native_layer_norm_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/dj/cdj76lqk55ydu5emruibt7phyaolsxyh6m2jy7woirwaofsgtdez.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_9 => add_8, erf, mul_7, mul_8, mul_9 # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, 0.5), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_8,), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %add_8), kwargs = {}) triton_poi_fused_gelu_14 = async_compile.triton('triton_poi_fused_gelu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3f/c3fjhuzqq4d4jzpq56rjjw2po2phiqwgkd4yuhx5hafczjkyoc6j.py # Topologically Sorted Source Nodes: [x_7, x_13], Original ATen: [aten.add] # Source node to ATen node mapping: # x_13 => add_9 # x_7 => add_5 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_17), kwargs = {}) triton_poi_fused_add_15 = async_compile.triton('triton_poi_fused_add_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_out_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (2, ), (1, )) assert_size_stride(primals_4, (6, 2), (2, 1)) assert_size_stride(primals_5, (2, 2), (2, 1)) assert_size_stride(primals_6, (2, ), (1, )) assert_size_stride(primals_7, (2, ), (1, )) assert_size_stride(primals_8, (2, ), (1, )) assert_size_stride(primals_9, (2, 2), (2, 1)) assert_size_stride(primals_10, (2, ), (1, )) assert_size_stride(primals_11, (2, 2, 3), (6, 3, 1)) assert_size_stride(primals_12, (2, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16, ), (1, )) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, primals_2, primals_3, buf1, 32, grid=grid(32), stream=stream0) del primals_2 del primals_3 buf2 = empty_strided_cuda((16, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (16, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 6), (1, 2), 0), out=buf2) buf3 = empty_strided_cuda((4, 2, 4, 1), (8, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf2, buf3, 8, 4, grid=grid(8, 4), stream=stream0) buf4 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf2, buf4, 8, 4, grid=grid(8, 4), stream=stream0) buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (8, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (8, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf5, buf6, 128, grid=grid(128), stream=stream0) buf7 = reinterpret_tensor(buf5, (4, 2, 4, 4), (32, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf6, buf7, 128, grid=grid(128), stream=stream0) del buf6 buf8 = empty_strided_cuda((4, 2, 4, 1), (8, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf2, buf8, 8, 4, grid=grid(8, 4), stream=stream0) del buf2 buf9 = empty_strided_cuda((8, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (8, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_7.run(buf9, buf10, 16, 2, grid=grid(16, 2), stream=stream0) buf11 = reinterpret_tensor(buf9, (16, 2), (2, 1), 0); del buf9 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf10, (16, 2), (2, 1), 0), reinterpret_tensor(primals_5, (2, 2), (1, 2), 0), out=buf11) buf12 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_8.run(primals_1, buf12, 16, grid=grid(16), stream=stream0) buf13 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm_1], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_9.run(primals_1, buf12, primals_7, primals_8, buf13, 32, grid=grid(32), stream=stream0) del primals_7 del primals_8 buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf13, (16, 2), (2, 1), 0), reinterpret_tensor(primals_9, (2, 2), (1, 2), 0), alpha=1, beta=1, out=buf14) del primals_10 buf15 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_10.run(buf14, buf15, 8, 4, grid=grid(8, 4), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf15, primals_11, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf16, (4, 2, 4), (8, 4, 1)) del buf15 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] triton_poi_fused_cat_11.run(buf11, primals_6, buf16, primals_12, buf17, 64, grid=grid(64), stream=stream0) del buf11 del buf16 del primals_12 del primals_6 buf18 = buf12; del buf12 # reuse buf19 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [x_7, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_12.run(primals_1, buf17, buf18, buf19, 16, grid=grid(16), stream=stream0) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7, layer_norm_2], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_13.run(primals_1, buf17, buf18, buf19, primals_13, primals_14, buf20, 64, grid=grid(64), stream=stream0) del buf18 del buf19 del primals_14 buf21 = empty_strided_cuda((16, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.addmm] extern_kernels.addmm(primals_16, reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf21) del primals_16 buf22 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.gelu] triton_poi_fused_gelu_14.run(buf21, buf22, 256, grid=grid(256), stream=stream0) buf23 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf22, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0), out=buf23) buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [x_7, x_13], Original ATen: [aten.add] triton_poi_fused_add_15.run(buf24, primals_1, buf17, primals_18, 64, grid=grid(64), stream=stream0) del primals_18 return (buf24, primals_1, primals_11, primals_13, reinterpret_tensor(buf1, (16, 2), (2, 1), 0), buf7, reinterpret_tensor(buf10, (16, 2), (2, 1), 0), reinterpret_tensor(buf13, (16, 2), (2, 1), 0), reinterpret_tensor(buf14, (4, 2, 4), (8, 1, 2), 0), buf17, reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf21, reinterpret_tensor(buf22, (16, 16), (16, 1), 0), primals_17, primals_15, primals_9, primals_5, reinterpret_tensor(buf8, (8, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (8, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (8, 4, 1), (4, 1, 4), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((6, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2, 2), (2, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((2, 2, 3), (6, 3, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn from torch.nn.modules.utils import _pair from functools import partial import torch.utils.data import torch.nn.parallel from torch import optim as optim def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DCNv2(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, deformable_groups=1): super(DCNv2, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.deformable_groups = deformable_groups self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size)) self.bias = nn.Parameter(torch.Tensor(out_channels)) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) self.bias.data.zero_() def forward(self, input, offset, mask): assert 2 * self.deformable_groups * self.kernel_size[0 ] * self.kernel_size[1] == offset.shape[1] assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[ 1] == mask.shape[1] return dcn_v2_conv(input, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.deformable_groups) class DCN(DCNv2): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, deformable_groups=1): super(DCN, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, deformable_groups) channels_ = self.deformable_groups * 3 * self.kernel_size[0 ] * self.kernel_size[1] self.conv_offset_mask = nn.Conv2d(self.in_channels, channels_, kernel_size=self.kernel_size, stride=self.stride, padding=self. padding, bias=True) self.init_offset() def init_offset(self): self.conv_offset_mask.weight.data.zero_() self.conv_offset_mask.bias.data.zero_() def forward(self, input): out = self.conv_offset_mask(input) o1, o2, mask = torch.chunk(out, 3, dim=1) offset = torch.cat((o1, o2), dim=1) mask = torch.sigmoid(mask) return dcn_v2_conv(input, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.deformable_groups) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LocalAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, local_ks=3, length=196): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) mask = torch.ones(length, length) for i in range(length): for j in range(i - local_ks // 2, i + local_ks // 2 + 1, 1): j = min(max(0, j), length - 1) mask[i, j] = 0 mask = mask.unsqueeze(0).unsqueeze(1) self.mask = nn.Parameter(mask, requires_grad=False) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.masked_fill_(self.mask.bool(), -np.inf) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LocalBranch(nn.Module): def __init__(self, dim, local_type='conv', local_ks=3, length=196, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.local_type = local_type if local_type == 'conv': self.linear = nn.Linear(dim, dim) self.local = nn.Conv1d(dim, dim, kernel_size=local_ks, padding= local_ks // 2, padding_mode='zeros', groups=1) elif local_type == 'dcn': self.linear = nn.Linear(dim, dim) self.local = DCN(dim, dim, kernel_size=(local_ks, 1), stride=1, padding=(local_ks // 2, 0), deformable_groups=2) elif local_type == 'attn': self.local = LocalAttention(dim, num_heads=num_heads, qkv_bias= qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop =proj_drop, local_ks=local_ks, length=length) else: self.local = nn.Identity() def forward(self, x): if self.local_type in ['conv']: x = self.linear(x) x = x.permute(0, 2, 1) x = self.local(x) x = x.permute(0, 2, 1) return x elif self.local_type == 'dcn': x = self.linear(x) x = x.permute(0, 2, 1).unsqueeze(3).contiguous() x = self.local(x) x = x.squeeze(3).permute(0, 2, 1) return x elif self.local_type == 'attn': x = self.local(x) return x else: x = self.local(x) return x class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Block_local(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), local_type= 'conv', local_ks=3, length=196, local_ratio=0.5, ffn_type='base'): super().__init__() local_dim = int(dim * local_ratio) self.global_dim = dim - local_dim div = 2 self.num_heads = num_heads // div self.norm1 = norm_layer(self.global_dim) self.norm1_local = norm_layer(local_dim) self.attn = Attention(self.global_dim, num_heads=self.num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.local = LocalBranch(local_dim, local_type=local_type, local_ks =local_ks, length=length, num_heads=self.num_heads, qkv_bias= qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) if ffn_type == 'base': MLP = Mlp else: raise Exception('invalid ffn_type: {}'.format(ffn_type)) self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x): x_attn = self.drop_path(self.attn(self.norm1(x[:, :, :self. global_dim]))) x_local = self.drop_path(self.local(self.norm1_local(x[:, :, self. global_dim:]))) x = x + torch.cat([x_attn, x_local], dim=2) x = x + self.drop_path(self.mlp(self.norm2(x))) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import numpy as np from torch import nn from torch.nn.modules.utils import _pair from functools import partial import torch.utils.data import torch.nn.parallel from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp3 - tmp1 tmp5 = tmp4 * tmp4 tmp7 = tmp6 - tmp1 tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = 2.0 tmp11 = tmp9 / tmp10 tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp2 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 6 * x2 + 24 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (2 + y0 + 6 * x2 + 24 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 6 * x2 + 24 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 2 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 8 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 2 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp3 - tmp1 tmp5 = tmp4 * tmp4 tmp7 = tmp6 - tmp1 tmp8 = tmp7 * tmp7 tmp9 = tmp5 + tmp8 tmp10 = 2.0 tmp11 = tmp9 / tmp10 tmp12 = 1e-06 tmp13 = tmp11 + tmp12 tmp14 = libdevice.rsqrt(tmp13) tmp15 = tmp2 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_convolution_10(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 8 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 2 y1 = yindex // 2 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 8 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_cat_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex // 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 2, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (2 * x3 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp13 = tl.load(in_ptr2 + (x1 + 4 * (-2 + x0) + 8 * x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.load(in_ptr3 + (-2 + x0), tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tmp13 + tmp14 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp10, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp9, tmp17) tl.store(out_ptr0 + x4, tmp18, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_gelu_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_15(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (6, 2), (2, 1)) assert_size_stride(primals_5, (2, 2), (2, 1)) assert_size_stride(primals_6, (2,), (1,)) assert_size_stride(primals_7, (2,), (1,)) assert_size_stride(primals_8, (2,), (1,)) assert_size_stride(primals_9, (2, 2), (2, 1)) assert_size_stride(primals_10, (2,), (1,)) assert_size_stride(primals_11, (2, 2, 3), (6, 3, 1)) assert_size_stride(primals_12, (2,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (16, 4), (4, 1)) assert_size_stride(primals_16, (16,), (1,)) assert_size_stride(primals_17, (4, 16), (16, 1)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(32)](primals_1, buf0, primals_2, primals_3, buf1, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 del primals_3 buf2 = empty_strided_cuda((16, 6), (6, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 6), (1, 2), 0), out=buf2) buf3 = empty_strided_cuda((4, 2, 4, 1), (8, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(8, 4)](buf2, buf3, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(8, 4)](buf2, buf4, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((8, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (8, 4, 1), (4, 1, 0), 0 ), reinterpret_tensor(buf4, (8, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(128)](buf5, buf6, 128, XBLOCK=128, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 2, 4, 4), (32, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_5[grid(128)](buf6, buf7, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf6 buf8 = empty_strided_cuda((4, 2, 4, 1), (8, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(8, 4)](buf2, buf8, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del buf2 buf9 = empty_strided_cuda((8, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf7, (8, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (8, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_clone_7[grid(16, 2)](buf9, buf10, 16, 2, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 2), (2, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (16, 2), (2, 1), 0), reinterpret_tensor(primals_5, (2, 2), (1, 2), 0), out=buf11) buf12 = buf0 del buf0 triton_poi_fused_native_layer_norm_8[grid(16)](primals_1, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_native_layer_norm_9[grid(32)](primals_1, buf12, primals_7, primals_8, buf13, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_7 del primals_8 buf14 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_10, reinterpret_tensor(buf13, (16, 2), (2, 1), 0), reinterpret_tensor(primals_9, (2, 2), (1, 2), 0), alpha=1, beta=1, out=buf14) del primals_10 buf15 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.float32) triton_poi_fused_convolution_10[grid(8, 4)](buf14, buf15, 8, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) buf16 = extern_kernels.convolution(buf15, primals_11, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf16, (4, 2, 4), (8, 4, 1)) del buf15 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_cat_11[grid(64)](buf11, primals_6, buf16, primals_12, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf11 del buf16 del primals_12 del primals_6 buf18 = buf12 del buf12 buf19 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_12[grid(16)](primals_1, buf17, buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_13[grid(64)](primals_1, buf17, buf18, buf19, primals_13, primals_14, buf20, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf18 del buf19 del primals_14 buf21 = empty_strided_cuda((16, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_16, reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf21) del primals_16 buf22 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_14[grid(256)](buf21, buf22, 256, XBLOCK=256, num_warps=4, num_stages=1) buf23 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf22, (16, 16), (16, 1), 0), reinterpret_tensor(primals_17, (16, 4), (1, 16), 0), out=buf23) buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0) del buf23 triton_poi_fused_add_15[grid(64)](buf24, primals_1, buf17, primals_18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 return buf24, primals_1, primals_11, primals_13, reinterpret_tensor(buf1, (16, 2), (2, 1), 0), buf7, reinterpret_tensor(buf10, (16, 2), (2, 1), 0 ), reinterpret_tensor(buf13, (16, 2), (2, 1), 0), reinterpret_tensor( buf14, (4, 2, 4), (8, 1, 2), 0), buf17, reinterpret_tensor(buf20, ( 16, 4), (4, 1), 0), buf21, reinterpret_tensor(buf22, (16, 16), (16, 1), 0 ), primals_17, primals_15, primals_9, primals_5, reinterpret_tensor( buf8, (8, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (8, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (8, 4, 1), (4, 1, 4), 0 ), primals_4 def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class DCNv2(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, deformable_groups=1): super(DCNv2, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.deformable_groups = deformable_groups self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size)) self.bias = nn.Parameter(torch.Tensor(out_channels)) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = 1.0 / math.sqrt(n) self.weight.data.uniform_(-stdv, stdv) self.bias.data.zero_() def forward(self, input, offset, mask): assert 2 * self.deformable_groups * self.kernel_size[0 ] * self.kernel_size[1] == offset.shape[1] assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[ 1] == mask.shape[1] return dcn_v2_conv(input, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.deformable_groups) class DCN(DCNv2): def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, deformable_groups=1): super(DCN, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, deformable_groups) channels_ = self.deformable_groups * 3 * self.kernel_size[0 ] * self.kernel_size[1] self.conv_offset_mask = nn.Conv2d(self.in_channels, channels_, kernel_size=self.kernel_size, stride=self.stride, padding=self. padding, bias=True) self.init_offset() def init_offset(self): self.conv_offset_mask.weight.data.zero_() self.conv_offset_mask.bias.data.zero_() def forward(self, input): out = self.conv_offset_mask(input) o1, o2, mask = torch.chunk(out, 3, dim=1) offset = torch.cat((o1, o2), dim=1) mask = torch.sigmoid(mask) return dcn_v2_conv(input, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.deformable_groups) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LocalAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, local_ks=3, length=196): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) mask = torch.ones(length, length) for i in range(length): for j in range(i - local_ks // 2, i + local_ks // 2 + 1, 1): j = min(max(0, j), length - 1) mask[i, j] = 0 mask = mask.unsqueeze(0).unsqueeze(1) self.mask = nn.Parameter(mask, requires_grad=False) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.masked_fill_(self.mask.bool(), -np.inf) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LocalBranch(nn.Module): def __init__(self, dim, local_type='conv', local_ks=3, length=196, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.local_type = local_type if local_type == 'conv': self.linear = nn.Linear(dim, dim) self.local = nn.Conv1d(dim, dim, kernel_size=local_ks, padding= local_ks // 2, padding_mode='zeros', groups=1) elif local_type == 'dcn': self.linear = nn.Linear(dim, dim) self.local = DCN(dim, dim, kernel_size=(local_ks, 1), stride=1, padding=(local_ks // 2, 0), deformable_groups=2) elif local_type == 'attn': self.local = LocalAttention(dim, num_heads=num_heads, qkv_bias= qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop =proj_drop, local_ks=local_ks, length=length) else: self.local = nn.Identity() def forward(self, x): if self.local_type in ['conv']: x = self.linear(x) x = x.permute(0, 2, 1) x = self.local(x) x = x.permute(0, 2, 1) return x elif self.local_type == 'dcn': x = self.linear(x) x = x.permute(0, 2, 1).unsqueeze(3).contiguous() x = self.local(x) x = x.squeeze(3).permute(0, 2, 1) return x elif self.local_type == 'attn': x = self.local(x) return x else: x = self.local(x) return x class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Block_localNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), local_type= 'conv', local_ks=3, length=196, local_ratio=0.5, ffn_type='base'): super().__init__() local_dim = int(dim * local_ratio) self.global_dim = dim - local_dim div = 2 self.num_heads = num_heads // div self.norm1 = norm_layer(self.global_dim) self.norm1_local = norm_layer(local_dim) self.attn = Attention(self.global_dim, num_heads=self.num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.local = LocalBranch(local_dim, local_type=local_type, local_ks =local_ks, length=length, num_heads=self.num_heads, qkv_bias= qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) if ffn_type == 'base': MLP = Mlp else: raise Exception('invalid ffn_type: {}'.format(ffn_type)) self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, input_0): primals_2 = self.norm1.weight primals_3 = self.norm1.bias primals_6 = self.norm1_local.weight primals_7 = self.norm1_local.bias primals_4 = self.attn.qkv.weight primals_5 = self.attn.proj.weight primals_8 = self.attn.proj.bias primals_9 = self.local.linear.weight primals_10 = self.local.linear.bias primals_11 = self.local.local.weight primals_12 = self.local.local.bias primals_13 = self.norm2.weight primals_14 = self.norm2.bias primals_15 = self.mlp.fc1.weight primals_16 = self.mlp.fc1.bias primals_17 = self.mlp.fc2.weight primals_18 = self.mlp.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
TencentYoutuResearch/BaseArchitecture-EAT
Block_local
false
18,031
[ "BSD-3-Clause" ]
9
b916738ef9b1314f5fdad780a0839cb4e010a208
https://github.com/TencentYoutuResearch/BaseArchitecture-EAT/tree/b916738ef9b1314f5fdad780a0839cb4e010a208
BatchMLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/6n/c6nha2q3zyxvtg2jmtvcuoiccmtryqlajcha5a7wl5jnmf6wzrgu.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf3, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 return (reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), primals_3, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class NPBlockRelu2d(nn.Module): """Block for Neural Processes.""" def __init__(self, in_channels, out_channels, dropout=0, batchnorm= False, bias=False): super().__init__() self.linear = nn.Linear(in_channels, out_channels, bias=bias) self.act = nn.ReLU() self.dropout = nn.Dropout2d(dropout) self.norm = nn.BatchNorm2d(out_channels) if batchnorm else False def forward(self, x): x = self.act(self.linear(x)) x = x.permute(0, 2, 1)[:, :, :, None] if self.norm: x = self.norm(x) x = self.dropout(x) return x[:, :, :, 0].permute(0, 2, 1) class BatchMLP(nn.Module): """Apply MLP to the final axis of a 3D tensor (reusing already defined MLPs). Args: input: input tensor of shape [B,n,d_in]. output_sizes: An iterable containing the output sizes of the MLP as defined in `basic.Linear`. Returns: tensor of shape [B,n,d_out] where d_out=output_size """ def __init__(self, input_size, output_size, num_layers=2, dropout=0, batchnorm=False): super().__init__() self.input_size = input_size self.output_size = output_size self.num_layers = num_layers self.initial = NPBlockRelu2d(input_size, output_size, dropout= dropout, batchnorm=batchnorm) self.encoder = nn.Sequential(*[NPBlockRelu2d(output_size, output_size, dropout=dropout, batchnorm=batchnorm) for _ in range(num_layers - 2)]) self.final = nn.Linear(output_size, output_size) def forward(self, x): x = self.initial(x) x = self.encoder(x) return self.final(x) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, reinterpret_tensor(buf1, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), primals_3, buf3 class NPBlockRelu2d(nn.Module): """Block for Neural Processes.""" def __init__(self, in_channels, out_channels, dropout=0, batchnorm= False, bias=False): super().__init__() self.linear = nn.Linear(in_channels, out_channels, bias=bias) self.act = nn.ReLU() self.dropout = nn.Dropout2d(dropout) self.norm = nn.BatchNorm2d(out_channels) if batchnorm else False def forward(self, x): x = self.act(self.linear(x)) x = x.permute(0, 2, 1)[:, :, :, None] if self.norm: x = self.norm(x) x = self.dropout(x) return x[:, :, :, 0].permute(0, 2, 1) class BatchMLPNew(nn.Module): """Apply MLP to the final axis of a 3D tensor (reusing already defined MLPs). Args: input: input tensor of shape [B,n,d_in]. output_sizes: An iterable containing the output sizes of the MLP as defined in `basic.Linear`. Returns: tensor of shape [B,n,d_out] where d_out=output_size """ def __init__(self, input_size, output_size, num_layers=2, dropout=0, batchnorm=False): super().__init__() self.input_size = input_size self.output_size = output_size self.num_layers = num_layers self.initial = NPBlockRelu2d(input_size, output_size, dropout= dropout, batchnorm=batchnorm) self.encoder = nn.Sequential(*[NPBlockRelu2d(output_size, output_size, dropout=dropout, batchnorm=batchnorm) for _ in range(num_layers - 2)]) self.final = nn.Linear(output_size, output_size) def forward(self, input_0): primals_1 = self.initial.linear.weight primals_3 = self.final.weight primals_4 = self.final.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
VersElectronics/Neural-Processes
BatchMLP
false
18,032
[ "MIT" ]
5
6eb7552a0d1c489189d6dd0f83704dcdbeaed24b
https://github.com/VersElectronics/Neural-Processes/tree/6eb7552a0d1c489189d6dd0f83704dcdbeaed24b
DropConnect
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/b5/cb5jr56ri7msmoyfncpvr6xndnnlmaws7kj4yseutcuccfdbrqhc.py # Topologically Sorted Source Nodes: [truediv, random_tensor, binary_tensor, output], Original ATen: [aten.div, aten.add, aten.floor, aten.mul] # Source node to ATen node mapping: # binary_tensor => floor # output => mul # random_tensor => add # truediv => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, -3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%rand, -3), kwargs = {}) # %floor : [num_users=1] = call_function[target=torch.ops.aten.floor.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %floor), kwargs = {}) triton_poi_fused_add_div_floor_mul_0 = async_compile.triton('triton_poi_fused_add_div_floor_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_floor_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_floor_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 16) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp1 = -0.3333333333333333 tmp2 = tmp0 * tmp1 tmp4 = -3.0 tmp5 = tmp3 + tmp4 tmp6 = libdevice.floor(tmp5) tmp7 = tmp2 * tmp6 tl.store(out_ptr0 + (x2), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [rand], Original ATen: [aten.rand] buf0 = torch.ops.aten.rand.default([4, 4, 1, 1], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [truediv, random_tensor, binary_tensor, output], Original ATen: [aten.div, aten.add, aten.floor, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_floor_mul_0.run(arg0_1, buf1, buf2, 256, grid=grid(256), stream=stream0) del arg0_1 del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class DropConnect(torch.nn.Module): def __init__(self, p): super(DropConnect, self).__init__() self.p = p def forward(self, inputs): batch_size = inputs.shape[0] inputs.shape[2] inputs.shape[3] channel_size = inputs.shape[1] keep_prob = 1 - self.p random_tensor = keep_prob random_tensor += torch.rand([batch_size, channel_size, 1, 1], dtype =inputs.dtype, device=inputs.device) binary_tensor = torch.floor(random_tensor) output = inputs / keep_prob * binary_tensor return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'p': 4}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_floor_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp1 = -0.3333333333333333 tmp2 = tmp0 * tmp1 tmp4 = -3.0 tmp5 = tmp3 + tmp4 tmp6 = libdevice.floor(tmp5) tmp7 = tmp2 * tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.rand.default([4, 4, 1, 1], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_floor_mul_0[grid(256)](arg0_1, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf1 return buf2, class DropConnectNew(torch.nn.Module): def __init__(self, p): super(DropConnectNew, self).__init__() self.p = p def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
VascoLopes/GEA
DropConnect
false
18,033
[ "MIT" ]
4
ab80dbb9851dfc215102e5222e8d5f70e855dd15
https://github.com/VascoLopes/GEA/tree/ab80dbb9851dfc215102e5222e8d5f70e855dd15
Block_cls
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/pd/cpdjcxzcbozbd2no6hmfxw5cyam5nu3s7bghjyjpllz6ihf4t7o2.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wk/cwkmrcckbwmqrnn75bcrj6x53nm4p3l2vitrgxgtbfaftyuxfsme.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3a/c3aaucsolrfdjphevn4qawbbrmr3jqka2dfrruxbjgx42g3fnwu6.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qm/cqm6eydvnbwdhiz3t2cvqh3aav3qx6gjm44e6xiiuh77uhbahl5n.py # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (8*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/25/c25jehyb2kwed7xq7lc3i4pphzroyhb7qjcs55wtc2k45bzxw6kz.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => exp # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1.0), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vm/cvmxc36ua3qvi5sfzotzrqa4y5ilb4k4ptxzsfqpgxth35vrel6k.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/eo/ceomsfy7as3ml2u4dirjh4z7qwlqo5kzlzamwy4rgf44iwhqihvp.py # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # matmul_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + (8*x2) + (32*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ac/cacm2ntqvhwvi57kc7o3xyfrih7vyzykapk5tukucpcqntt3w7qa.py # Topologically Sorted Source Nodes: [x_1, xq, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => var_mean_1 # x_1 => add_2 # xq => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %primals_8), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (0)) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (1)) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr2 + (2)) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (3)) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) tl.store(out_ptr1 + (x0), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/bm/cbmb3vur6ljznzso3onopwciqrqarqwzdhovbko42e6yszqbnb2q.py # Topologically Sorted Source Nodes: [x_1, xq, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm_1 => add_4, add_5, mul_3, mul_4, rsqrt_1, sub_2 # x_1 => add_2 # xq => add_3 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %primals_8), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_9), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_10), kwargs = {}) triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7k/c7kw7kxz3vrpdbf32n46fjck2yjixdldovgiamlsf6pkrslqg337.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_4 => add_6, erf, mul_5, mul_6, mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_16, 0.5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_16, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_6), kwargs = {}) triton_poi_fused_gelu_9 = async_compile.triton('triton_poi_fused_gelu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3k/c3khjmzmjg2bcd3frznli5vh3rv5b57cxqqi4nq7yrktnefsiqqg.py # Topologically Sorted Source Nodes: [x_1, xq, xq_1], Original ATen: [aten.add] # Source node to ATen node mapping: # x_1 => add_2 # xq => add_3 # xq_1 => add_7 # Graph fragment: # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_14, %primals_8), kwargs = {}) # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %add_2), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_18), kwargs = {}) triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x2), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (16, ), (1, )) assert_size_stride(primals_13, (4, 16), (16, 1)) assert_size_stride(primals_14, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0) buf2 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf3, 64, grid=grid(64), stream=stream0) del primals_1 del primals_2 buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf4, buf5, 16, 4, grid=grid(16, 4), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf2, buf6, 16, 4, grid=grid(16, 4), stream=stream0) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf7, buf8, 256, grid=grid(256), stream=stream0) buf9 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf8, buf9, 256, grid=grid(256), stream=stream0) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone] triton_poi_fused_clone_6.run(buf2, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del buf2 buf11 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf13) buf14 = buf1; del buf1 # reuse buf15 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_1, xq, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_3, buf13, primals_8, buf14, buf15, 16, grid=grid(16), stream=stream0) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, xq, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf13, primals_8, buf14, buf15, primals_9, primals_10, buf16, 64, grid=grid(64), stream=stream0) del buf14 del buf15 del primals_10 buf17 = reinterpret_tensor(buf8, (16, 16), (16, 1), 0); del buf8 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_12, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_12 buf18 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu] triton_poi_fused_gelu_9.run(buf17, buf18, 256, grid=grid(256), stream=stream0) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf18, (16, 16), (16, 1), 0), reinterpret_tensor(primals_13, (16, 4), (1, 16), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse # Topologically Sorted Source Nodes: [x_1, xq, xq_1], Original ATen: [aten.add] triton_poi_fused_add_10.run(buf20, primals_3, buf13, primals_8, primals_14, 64, grid=grid(64), stream=stream0) del primals_14 return (buf20, primals_3, primals_8, primals_9, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17, reinterpret_tensor(buf18, (16, 16), (16, 1), 0), primals_13, primals_11, primals_7, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0), primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from functools import partial import torch.utils.data import torch.nn.parallel from torch import optim as optim def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class CrossAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.q = nn.Linear(dim, dim * 1, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, xq): B, N, C = x.shape _, Nq, _ = xq.shape kv = self.kv(x).reshape(B, N, 2, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q_ = self.q(xq).reshape(B, Nq, 1, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = q_[0], kv[0], kv[1] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, Nq, C) x = self.proj(x) x = self.proj_drop(x) return x class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Block_cls(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), local_type= 'conv', local_ks=3, local_ratio=0.5, ffn_type='base'): super().__init__() self.norm1 = norm_layer(dim) self.attn = CrossAttention(dim, num_heads=num_heads, qkv_bias= qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) if ffn_type == 'base': MLP = Mlp else: raise Exception('invalid ffn_type: {}'.format(ffn_type)) self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, x, xq): xq = xq + self.drop_path(self.attn(x, self.norm1(xq))) xq = xq + self.drop_path(self.mlp(self.norm2(xq))) return xq def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from functools import partial import torch.utils.data import torch.nn.parallel from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 8 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr2 + 2) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + 3) tmp23 = tl.broadcast_to(tmp22, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tmp10 = tmp7 + tmp9 tmp11 = tmp6 + tmp10 tmp12 = tmp5 + tmp11 tmp17 = tmp14 + tmp16 tmp18 = tmp13 + tmp17 tmp19 = tmp12 + tmp18 tmp24 = tmp21 + tmp23 tmp25 = tmp20 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tmp29 = tmp5 - tmp28 tmp30 = tmp29 * tmp29 tmp31 = tmp11 - tmp28 tmp32 = tmp31 * tmp31 tmp33 = tmp30 + tmp32 tmp34 = tmp18 - tmp28 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp25 - tmp28 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp39 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp40, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-06 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp6 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (8, 4), (4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (16, 4), (4, 1)) assert_size_stride(primals_12, (16,), (1,)) assert_size_stride(primals_13, (4, 16), (16, 1)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 8), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf4, buf5, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf4 triton_poi_fused_clone_3[grid(16, 4)](buf2, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf6, (16, 1, 4), (4, 0, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf7, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) buf9 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf7 triton_poi_fused__softmax_5[grid(256)](buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_6[grid(16, 4)](buf2, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf2 buf11 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf13) buf14 = buf1 del buf1 buf15 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_3, buf13, primals_8, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_3, buf13, primals_8, buf14, buf15, primals_9, primals_10, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_10 buf17 = reinterpret_tensor(buf8, (16, 16), (16, 1), 0) del buf8 extern_kernels.addmm(primals_12, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_12 buf18 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_gelu_9[grid(256)](buf17, buf18, 256, XBLOCK=256, num_warps=4, num_stages=1) buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 16), (16, 1), 0), reinterpret_tensor(primals_13, (16, 4), (1, 16), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_10[grid(64)](buf20, primals_3, buf13, primals_8, primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 return buf20, primals_3, primals_8, primals_9, reinterpret_tensor(primals_4 , (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (16, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0 ), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0 ), buf17, reinterpret_tensor(buf18, (16, 16), (16, 1), 0 ), primals_13, primals_11, primals_7, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 4), 0), primals_6 def drop_path(x, drop_prob: 'float'=0.0, training: 'bool'=False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x. device) random_tensor.floor_() output = x.div(keep_prob) * random_tensor return output class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class CrossAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.q = nn.Linear(dim, dim * 1, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, xq): B, N, C = x.shape _, Nq, _ = xq.shape kv = self.kv(x).reshape(B, N, 2, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q_ = self.q(xq).reshape(B, Nq, 1, self.num_heads, C // self.num_heads ).permute(2, 0, 3, 1, 4) q, k, v = q_[0], kv[0], kv[1] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, Nq, C) x = self.proj(x) x = self.proj_drop(x) return x class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class Block_clsNew(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn .GELU, norm_layer=partial(nn.LayerNorm, eps=1e-06), local_type= 'conv', local_ks=3, local_ratio=0.5, ffn_type='base'): super().__init__() self.norm1 = norm_layer(dim) self.attn = CrossAttention(dim, num_heads=num_heads, qkv_bias= qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path ) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) if ffn_type == 'base': MLP = Mlp else: raise Exception('invalid ffn_type: {}'.format(ffn_type)) self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) def forward(self, input_0, input_1): primals_1 = self.norm1.weight primals_2 = self.norm1.bias primals_5 = self.attn.kv.weight primals_6 = self.attn.q.weight primals_7 = self.attn.proj.weight primals_8 = self.attn.proj.bias primals_9 = self.norm2.weight primals_10 = self.norm2.bias primals_11 = self.mlp.fc1.weight primals_12 = self.mlp.fc1.bias primals_13 = self.mlp.fc2.weight primals_14 = self.mlp.fc2.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
TencentYoutuResearch/BaseArchitecture-EAT
Block_cls
false
18,034
[ "BSD-3-Clause" ]
9
b916738ef9b1314f5fdad780a0839cb4e010a208
https://github.com/TencentYoutuResearch/BaseArchitecture-EAT/tree/b916738ef9b1314f5fdad780a0839cb4e010a208
Classifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zm/czm6acrrgjryz6xi3wza7npycjuiqsdsygpfdo3lbzaquecrmeuj.py # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] # Source node to ATen node mapping: # z => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [z], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 return (buf1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class Classifier(nn.Module): def __init__(self, feature_dim, classes): super(Classifier, self).__init__() self.classifier = nn.Linear(int(feature_dim * 2), classes) def forward(self, di_z, ds_z): z = torch.cat((di_z, ds_z), dim=1) y = self.classifier(z) return y def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'feature_dim': 4, 'classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_4, buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1) del primals_3 del primals_4 return buf1, buf0 class ClassifierNew(nn.Module): def __init__(self, feature_dim, classes): super(ClassifierNew, self).__init__() self.classifier = nn.Linear(int(feature_dim * 2), classes) def forward(self, input_0, input_1): primals_3 = self.classifier.weight primals_4 = self.classifier.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
VinAIResearch/mDSDI
Classifier
false
18,035
[ "Apache-2.0" ]
9
8ec49085d8389ab490ec633c3ae4bf66be085366
https://github.com/VinAIResearch/mDSDI/tree/8ec49085d8389ab490ec633c3ae4bf66be085366
AdaFRN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ca/cca4ptql7bziqg4tcfgi2x3bdblajwh6hkqnosoapllrke4gsvwq.py # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, x, add_1, out, max_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # max_1 => maximum # mean => mean # out => mul_1 # pow_1 => pow_1 # rsqrt => rsqrt # x => mul # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2, 3], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %rsqrt), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %mul), kwargs = {}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%mul_1, %primals_5), kwargs = {}) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (r3 + (16*x2)), xmask, other=0.0) tmp18 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = 16.0 tmp12 = tmp10 / tmp11 tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = libdevice.rsqrt(tmp14) tmp16 = tmp5 * tmp15 tmp17 = tmp4 * tmp16 tmp19 = triton_helpers.maximum(tmp17, tmp18) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (r3 + (16*x2)), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf2) del primals_2 buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, x, add_1, out, max_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] stream0 = get_raw_stream(0) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0.run(buf1, buf2, primals_3, primals_1, primals_5, buf3, buf4, 16, 16, grid=grid(16), stream=stream0) del buf2 del primals_3 return (buf4, primals_1, primals_4, primals_5, buf1, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AdaFRN(nn.Module): def __init__(self, style_dim, num_features, eps=1e-05): super(AdaFRN, self).__init__() self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.fc = nn.Linear(style_dim, num_features * 2) self.eps = eps def forward(self, x, s): x = x * torch.rsqrt(torch.mean(x ** 2, dim=[2, 3], keepdim=True) + self.eps) h = self.fc(s) h = h.view(h.size(0), h.size(1), 1, 1) gamma, _beta = torch.chunk(h, chunks=2, dim=1) out = (1 + gamma) * x return torch.max(out, self.tau) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'style_dim': 4, 'num_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 4 x1 = xindex // 4 x2 = xindex r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (r3 + 16 * x2), xmask, other=0.0) tmp18 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = 16.0 tmp12 = tmp10 / tmp11 tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = libdevice.rsqrt(tmp14) tmp16 = tmp5 * tmp15 tmp17 = tmp4 * tmp16 tmp19 = triton_helpers.maximum(tmp17, tmp18) tl.store(out_ptr0 + x2, tmp4, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + (r3 + 16 * x2), tmp19, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 4), (4, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 8), (8, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (4, 8), (1, 4), 0), out=buf2) del primals_2 buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0[grid(16)](buf1, buf2, primals_3, primals_1, primals_5, buf3, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del buf2 del primals_3 return buf4, primals_1, primals_4, primals_5, buf1, buf3 class AdaFRNNew(nn.Module): def __init__(self, style_dim, num_features, eps=1e-05): super(AdaFRNNew, self).__init__() self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.fc = nn.Linear(style_dim, num_features * 2) self.eps = eps def forward(self, input_0, input_1): primals_5 = self.tau primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
UdonDa/StarGAN-v2-pytorch-nonofficial
AdaFRN
false
18,036
[ "MIT" ]
9
219df6b7fd4bd533686e2093ee914a337914ca9b
https://github.com/UdonDa/StarGAN-v2-pytorch-nonofficial/tree/219df6b7fd4bd533686e2093ee914a337914ca9b
LayerNormalization
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/fu/cfugksdllcmoftquqfpuvvwxexhbun72gudxpt6h3bgwcc3ga5zm.py # Topologically Sorted Source Nodes: [sub, add, ln_out, mul, ln_out_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # ln_out => div # ln_out_1 => add_1 # mul => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %expand), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 0.001), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {}) triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 0.001 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x2), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, add, ln_out, mul, ln_out_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class LayerNormalization(nn.Module): def __init__(self, d_hid, eps=0.001): super(LayerNormalization, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid), requires_grad=True) self.beta = nn.Parameter(torch.zeros(d_hid), requires_grad=True) self.eps = eps def forward(self, z): mean = z.mean(dim=-1, keepdim=True) std = z.std(dim=-1, keepdim=True) ln_out = (z - mean.expand_as(z)) / (std.expand_as(z) + self.eps) ln_out = self.gamma.expand_as(ln_out) * ln_out + self.beta.expand_as( ln_out) return ln_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_hid': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 0.001 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormalizationNew(nn.Module): def __init__(self, d_hid, eps=0.001): super(LayerNormalizationNew, self).__init__() self.gamma = nn.Parameter(torch.ones(d_hid), requires_grad=True) self.beta = nn.Parameter(torch.zeros(d_hid), requires_grad=True) self.eps = eps def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
VarnithChordia/Multlingual_Punctuation_restoration
LayerNormalization
false
18,037
[ "MIT" ]
8
17c026e8935b9fecae01d446a756926c7733fcd1
https://github.com/VarnithChordia/Multlingual_Punctuation_restoration/tree/17c026e8935b9fecae01d446a756926c7733fcd1
DiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/cs/ccsvnkti3aqfasbx7dbo7nr35tpiu5icq3cchvycu6z3tse4u6dq.py # Topologically Sorted Source Nodes: [mul, intersection, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # intersection => sum_1 # mul => mul # sum_2 => sum_2 # sum_3 => sum_3 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {}) triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + (x0), tmp6, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) tl.store(out_ptr2 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/v3/cv32onss65x73xaxjugvotx5rbckqzebdput6qjdr7rsrvpycxbm.py # Topologically Sorted Source Nodes: [mul_1, add_1, union, add_2, loss, log, mean, neg], Original ATen: [aten.mul, aten.add, aten.div, aten.log, aten.mean, aten.neg] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # log => log # loss => div # mean => mean # mul_1 => mul_1 # neg => neg # union => add # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%log,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {}) triton_per_fused_add_div_log_mean_mul_neg_1 = async_compile.triton('triton_per_fused_add_div_log_mean_mul_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mean_mul_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_log_mean_mul_neg_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp5 = tl.load(in_ptr1 + (r0), None) tmp6 = tl.load(in_ptr2 + (r0), None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 4.0 tmp15 = tmp13 / tmp14 tmp16 = -tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul, intersection, sum_2, sum_3], Original ATen: [aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [mul_1, add_1, union, add_2, loss, log, mean, neg], Original ATen: [aten.mul, aten.add, aten.div, aten.log, aten.mean, aten.neg] triton_per_fused_add_div_log_mean_mul_neg_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0) del buf0 del buf1 del buf2 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class DiceLoss(nn.Module): def __init__(self, eps: 'int'=1) ->None: super().__init__() self.eps = eps def forward(self, output: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: batch_size = output.shape[0] dice_target = target.reshape(batch_size, -1) dice_output = output.reshape(batch_size, -1) intersection = torch.sum(dice_output * dice_target, dim=1) union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1) loss = (2 * intersection + self.eps) / (union + self.eps) return -torch.log(loss).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) tl.store(out_ptr2 + x0, tmp14, xmask) @triton.jit def triton_per_fused_add_div_log_mean_mul_neg_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = 4.0 tmp15 = tmp13 / tmp14 tmp16 = -tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_log_mean_mul_neg_1[grid(1)](buf4, buf0, buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, class DiceLossNew(nn.Module): def __init__(self, eps: 'int'=1) ->None: super().__init__() self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TylerYep/ml-toolkit
DiceLoss
false
18,038
[ "MIT" ]
7
095bdce961133acc720f90b6d1bbb0a7becbfc9f
https://github.com/TylerYep/ml-toolkit/tree/095bdce961133acc720f90b6d1bbb0a7becbfc9f
GAP1d
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/4d/c4dwaxcadda6osi7swypsmhlolcgut64uav2vxfbfdkzciwpyarm.py # Topologically Sorted Source Nodes: [adaptive_avg_pool1d], Original ATen: [aten.mean] # Source node to ATen node mapping: # adaptive_avg_pool1d => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%unsqueeze, [-1, -2], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [adaptive_avg_pool1d], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 1), (1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class GAP1d(nn.Module): """Global Adaptive Pooling + Flatten """ def __init__(self, output_size=1): super(GAP1d, self).__init__() self.gap = nn.AdaptiveAvgPool1d(output_size) self.flatten = Flatten() def forward(self, x): return self.flatten(self.gap(x)) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1), (1, 1), 0), class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class GAP1dNew(nn.Module): """Global Adaptive Pooling + Flatten """ def __init__(self, output_size=1): super(GAP1dNew, self).__init__() self.gap = nn.AdaptiveAvgPool1d(output_size) self.flatten = Flatten() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
VincentSch4rf/torchtime
GAP1d
false
18,039
[ "Apache-2.0" ]
4
bebd006cd67b31c342e0658285c9771c27411df0
https://github.com/VincentSch4rf/torchtime/tree/bebd006cd67b31c342e0658285c9771c27411df0
LRN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/fp/cfpylbinohoan7yh5fcfp46fvbiwpo63lxt42umo3xnsuu3aeumz.py # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] # Source node to ATen node mapping: # add => add # div_2 => pow_2 # mul => mul # x => div # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 1.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.0), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.75), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {}) triton_poi_fused_add_div_mul_pow_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, add, div_2, x], Original ATen: [aten.mul, aten.add, aten.pow, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class LRN(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True ): super(LRN, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, x): if self.ACROSS_CHANNELS: div = x.pow(2).unsqueeze(1) div = self.average(div).squeeze(1) div = div.mul(self.alpha).add(1.0).pow(self.beta) else: div = x.pow(2) div = self.average(div) div = div.mul(self.alpha).add(1.0).pow(self.beta) x = x.div(div) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 + tmp2 tmp6 = 0.75 tmp7 = libdevice.pow(tmp5, tmp6) tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class LRNNew(nn.Module): def __init__(self, local_size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=True ): super(LRNNew, self).__init__() self.ACROSS_CHANNELS = ACROSS_CHANNELS if ACROSS_CHANNELS: self.average = nn.AvgPool3d(kernel_size=(local_size, 1, 1), stride=1, padding=(int((local_size - 1.0) / 2), 0, 0)) else: self.average = nn.AvgPool2d(kernel_size=local_size, stride=1, padding=int((local_size - 1.0) / 2)) self.alpha = alpha self.beta = beta def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
VisionLearningGroup/CDS
LRN
false
18,040
[ "MIT" ]
7
5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
https://github.com/VisionLearningGroup/CDS/tree/5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
BinaryFocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yg/cygw7bdzflz4mj5iymgjt4uc4zz5mbwb4aufh3yqxedyqkssz2v3.py # Topologically Sorted Source Nodes: [neg, p, sub, pow_1, mul, add, log, loss_pos, sub_1, neg_1, pow_2, mul_2, sub_2, add_1, log_1, loss_neg, loss, mean], Original ATen: [aten.neg, aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.add, aten.log, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # log => log # log_1 => log_1 # loss => add_2 # loss_neg => mul_3 # loss_pos => mul_1 # mean => mean # mul => mul # mul_2 => mul_2 # neg => neg # neg_1 => neg_1 # p => sigmoid # pow_1 => pow_1 # pow_2 => pow_2 # sub => sub # sub_1 => sub_1 # sub_2 => sub_2 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {}) # %sigmoid : [num_users=4] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2.0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %pow_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-09), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %log), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sub_1,), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_1, %pow_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-09), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %log_1), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_2,), kwargs = {}) triton_per_fused_add_log_mean_mul_neg_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_neg_pow_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_neg_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_log_mean_mul_neg_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp2 = tl.load(in_ptr1 + (r0), None) tmp1 = -tmp0 tmp3 = tl.sigmoid(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp1 * tmp6 tmp8 = 1e-09 tmp9 = tmp3 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp7 * tmp10 tmp12 = tmp4 - tmp0 tmp13 = -tmp12 tmp14 = tmp3 * tmp3 tmp15 = tmp13 * tmp14 tmp16 = tmp5 + tmp8 tmp17 = tl_math.log(tmp16) tmp18 = tmp15 * tmp17 tmp19 = tmp11 + tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = 256.0 tmp24 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp24, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [neg, p, sub, pow_1, mul, add, log, loss_pos, sub_1, neg_1, pow_2, mul_2, sub_2, add_1, log_1, loss_neg, loss, mean], Original ATen: [aten.neg, aten.sigmoid, aten.rsub, aten.pow, aten.mul, aten.add, aten.log, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_add_log_mean_mul_neg_pow_rsub_sigmoid_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def binary_focal_loss(pred, target, gamma=2.0, alpha=-1, reduction='mean'): p = torch.sigmoid(pred) loss_pos = -target * (1.0 - p) ** gamma * torch.log(p + 1e-09) loss_neg = -(1.0 - target) * p ** gamma * torch.log(1.0 - p + 1e-09) if alpha >= 0.0 and alpha <= 1.0: loss_pos = loss_pos * alpha loss_neg = loss_neg * (1.0 - alpha) loss = loss_pos + loss_neg if reduction == 'mean': return loss.mean() elif reduction == 'sum': return loss.sum() elif reduction == 'none': return loss else: raise RuntimeError class BinaryFocalLoss(nn.Module): def __init__(self, gamma=2.0, alpha=-1): super(BinaryFocalLoss, self).__init__() self.gamma, self.alpha = gamma, alpha def forward(self, pred, target, reduction='mean'): return binary_focal_loss(pred, target, self.gamma, self.alpha, reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_log_mean_mul_neg_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = -tmp0 tmp3 = tl.sigmoid(tmp2) tmp4 = 1.0 tmp5 = tmp4 - tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp1 * tmp6 tmp8 = 1e-09 tmp9 = tmp3 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp7 * tmp10 tmp12 = tmp4 - tmp0 tmp13 = -tmp12 tmp14 = tmp3 * tmp3 tmp15 = tmp13 * tmp14 tmp16 = tmp5 + tmp8 tmp17 = tl_math.log(tmp16) tmp18 = tmp15 * tmp17 tmp19 = tmp11 + tmp18 tmp20 = tl.broadcast_to(tmp19, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = 256.0 tmp24 = tmp22 / tmp23 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_log_mean_mul_neg_pow_rsub_sigmoid_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def binary_focal_loss(pred, target, gamma=2.0, alpha=-1, reduction='mean'): p = torch.sigmoid(pred) loss_pos = -target * (1.0 - p) ** gamma * torch.log(p + 1e-09) loss_neg = -(1.0 - target) * p ** gamma * torch.log(1.0 - p + 1e-09) if alpha >= 0.0 and alpha <= 1.0: loss_pos = loss_pos * alpha loss_neg = loss_neg * (1.0 - alpha) loss = loss_pos + loss_neg if reduction == 'mean': return loss.mean() elif reduction == 'sum': return loss.sum() elif reduction == 'none': return loss else: raise RuntimeError class BinaryFocalLossNew(nn.Module): def __init__(self, gamma=2.0, alpha=-1): super(BinaryFocalLossNew, self).__init__() self.gamma, self.alpha = gamma, alpha def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
VisualComputingInstitute/Person_MinkUNet
BinaryFocalLoss
false
18,041
[ "MIT" ]
4
fa39764245a022740c0a3d8c85026532fff93e74
https://github.com/VisualComputingInstitute/Person_MinkUNet/tree/fa39764245a022740c0a3d8c85026532fff93e74
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/jf/cjfodn6ytrtuoaceb5njpdwxiajd3oeiid7qxygpb2adoxjoyvvf.py # Topologically Sorted Source Nodes: [mean, std, sub, add, output, output_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div] # Source node to ATen node mapping: # add => add # mean => mean # output => div # output_1 => add_1 # std => sqrt, var # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_2), kwargs = {}) triton_poi_fused_add_div_mean_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + (x2), tmp29, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, output, output_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class LayerNorm(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self.scale: self.scale_param = nn.Parameter(torch.ones(features)) else: self.scale_param = None if self.center: self.center_param = nn.Parameter(torch.zeros(features)) else: self.center_param = None def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) output = (x - mean) / (std + self.eps) if self.scale: output = output * self.scale_param if self.center: output = output + self.center_param return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LayerNormNew(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self.scale: self.scale_param = nn.Parameter(torch.ones(features)) else: self.scale_param = None if self.center: self.center_param = nn.Parameter(torch.zeros(features)) else: self.center_param = None def forward(self, input_0): primals_2 = self.center_param primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
UT-Austin-RPL/maple
LayerNorm
false
18,042
[ "MIT" ]
9
aef9fe9869945df5bbd1b02fd40813aac135cf5a
https://github.com/UT-Austin-RPL/maple/tree/aef9fe9869945df5bbd1b02fd40813aac135cf5a
SAM_Module
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/vu/cvuk26doyygvttg2zp55bryzzvez7ugdrqavksjocjhjxdz7aa6d.py # Topologically Sorted Source Nodes: [avg], Original ATen: [aten.mean] # Source node to ATen node mapping: # avg => mean # Graph fragment: # %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/id/cidq7yz75eih55wcintoo4y4wimds2ct66mggi4tcj5vxeakq36c.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tu/ctuxpzmkuoetuxrtzq5p4ncb5fkrq23s7htnqpevnc3iy2xatyp7.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # x_1 => sigmoid # x_2 => mul # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [avg], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_2.run(primals_1, buf2, buf3, 256, grid=grid(256), stream=stream0) return (buf3, primals_1, primals_2, buf0, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torchvision.transforms import * class SAM_Module(nn.Module): """ Position attention module""" def __init__(self, channels): super(SAM_Module, self).__init__() self.relu = nn.ReLU(inplace=True) self.conv_after_concat = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1) self.sigmoid_spatial = nn.Sigmoid() def forward(self, x): """ inputs : x : input feature maps( B X C X H X W) returns : out : attention value + input feature attention: B X (HxW) X (HxW) """ module_input = x avg = torch.mean(x, 1, True) x = self.conv_after_concat(avg) x = self.sigmoid_spatial(x) x = module_input * x return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x3, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf3, primals_1, primals_2, buf0, buf2 class SAM_ModuleNew(nn.Module): """ Position attention module""" def __init__(self, channels): super(SAM_ModuleNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.conv_after_concat = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1) self.sigmoid_spatial = nn.Sigmoid() def forward(self, input_0): primals_2 = self.conv_after_concat.weight primals_3 = self.conv_after_concat.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Vill-Lab/IGOAS
SAM_Module
false
18,043
[ "MIT" ]
8
42ca1d45e441f993c95b5e8f33c9f97ea3b916f3
https://github.com/Vill-Lab/IGOAS/tree/42ca1d45e441f993c95b5e8f33c9f97ea3b916f3
Normalize
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yg/cygftvha4smzweiqrq2ltdt3ehkash6qwhdujk77aebqofz4rpbb.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 0.25 tmp17 = libdevice.pow(tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp0 / tmp19 tl.store(out_ptr0 + (x2), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import Tensor from typing import Tuple import torch.nn.functional as F import torch.nn.functional class Normalize(torch.nn.Module): """Normalize a tensor time series with mean and standard deviation. Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` channels, this transform will normalize each channel of the input ``torch.*Tensor`` i.e., ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` .. note:: This transform acts out of place, i.e., it does not mutate the input tensor. Args: mean (tuple): Sequence of means for each channel. std (tuple): Sequence of standard deviations for each channel. inplace(bool,optional): Bool to make this operation in-place. """ def __init__(self, mean: 'Tuple[float]', std: 'Tuple[float]', inplace: 'bool'=False): super(Normalize, self).__init__() self.mean = mean self.std = std self.inplace = inplace def forward(self, tensor: 'Tensor') ->Tensor: """ Args: tensor (Tensor): Tensor time series to be normalized. Returns: Tensor: Normalized Tensor series. """ return F.normalize(tensor, self.mean, self.std, self.inplace) def __repr__(self): return self.__class__.__name__ + '(mean={0}, std={1})'.format(self. mean, self.std) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'mean': 4, 'std': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from typing import Tuple import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp1 * tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = 0.25 tmp17 = libdevice.pow(tmp15, tmp16) tmp18 = 0.0 tmp19 = triton_helpers.maximum(tmp17, tmp18) tmp20 = tmp0 / tmp19 tl.store(out_ptr0 + x2, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(torch.nn.Module): """Normalize a tensor time series with mean and standard deviation. Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` channels, this transform will normalize each channel of the input ``torch.*Tensor`` i.e., ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` .. note:: This transform acts out of place, i.e., it does not mutate the input tensor. Args: mean (tuple): Sequence of means for each channel. std (tuple): Sequence of standard deviations for each channel. inplace(bool,optional): Bool to make this operation in-place. """ def __init__(self, mean: 'Tuple[float]', std: 'Tuple[float]', inplace: 'bool'=False): super(NormalizeNew, self).__init__() self.mean = mean self.std = std self.inplace = inplace def __repr__(self): return self.__class__.__name__ + '(mean={0}, std={1})'.format(self. mean, self.std) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
VincentSch4rf/torchtime
Normalize
false
18,044
[ "Apache-2.0" ]
4
bebd006cd67b31c342e0658285c9771c27411df0
https://github.com/VincentSch4rf/torchtime/tree/bebd006cd67b31c342e0658285c9771c27411df0
LinearAverage
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/mx/cmxbfjb72nytzsznd77iesf5kerab7o43pm2jcylpdcy375xdpon.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.div] # Source node to ATen node mapping: # out => div # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, 0.05), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(buf1, 16, grid=grid(16), stream=stream0) return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class LinearAverage(nn.Module): def __init__(self, inputSize, outputSize, T=0.05, momentum=0.5): super(LinearAverage, self).__init__() self.nLem = outputSize self.momentum = momentum self.register_buffer('params', torch.tensor([T, momentum])) self.register_buffer('memory', torch.zeros(outputSize, inputSize)) self.flag = 0 self.T = T self.memory = self.memory self.memory_first = True def forward(self, x, y): out = torch.mm(x, self.memory.t()) / self.T return out def update_wegiht(self, features, index): weight_pos = self.memory.index_select(0, index.data.view(-1) ).resize_as_(features) weight_pos.mul_(self.momentum) weight_pos.add_(torch.mul(features.data, 1 - self.momentum)) w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5) updated_weight = weight_pos.div(w_norm) self.memory.index_copy_(0, index, updated_weight) self.memory = F.normalize(self.memory) def set_weight(self, features, index): self.memory.index_select(0, index.data.view(-1)).resize_as_(features) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'inputSize': 4, 'outputSize': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg1_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_div_0[grid(16)](buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf1, class LinearAverageNew(nn.Module): def __init__(self, inputSize, outputSize, T=0.05, momentum=0.5): super(LinearAverageNew, self).__init__() self.nLem = outputSize self.momentum = momentum self.register_buffer('params', torch.tensor([T, momentum])) self.register_buffer('memory', torch.zeros(outputSize, inputSize)) self.flag = 0 self.T = T self.memory = self.memory self.memory_first = True def update_wegiht(self, features, index): weight_pos = self.memory.index_select(0, index.data.view(-1) ).resize_as_(features) weight_pos.mul_(self.momentum) weight_pos.add_(torch.mul(features.data, 1 - self.momentum)) w_norm = weight_pos.pow(2).sum(1, keepdim=True).pow(0.5) updated_weight = weight_pos.div(w_norm) self.memory.index_copy_(0, index, updated_weight) self.memory = F.normalize(self.memory) def set_weight(self, features, index): self.memory.index_select(0, index.data.view(-1)).resize_as_(features) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
VisionLearningGroup/CDS
LinearAverage
false
18,045
[ "MIT" ]
7
5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
https://github.com/VisionLearningGroup/CDS/tree/5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
L2Norm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/4x/c4xuscq35mxnzg5sltp5ycgzhepfbdkh4jg7xnxvvfceejxdny2y.py # Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul] # Source node to ATen node mapping: # out => mul # x => div # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_1, %div), kwargs = {}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %div), kwargs = {}) triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x0 = xindex % 16 x1 = (xindex // 16) % 4 x3 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x5), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp15 tl.store(out_ptr0 + (x5), tmp15, xmask) tl.store(out_ptr1 + (x5), tmp17, xmask) tl.store(out_ptr2 + (x5), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, buf1, primals_1, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf1, buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data import torch.nn.init as init class L2Norm(nn.Module): def __init__(self, n_channels, scale): super(L2Norm, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, x): norm = x.pow(2).sum(1).sqrt() + self.eps x /= norm.expand_as(x) out = self.weight.unsqueeze(0).expand_as(x) * x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data import torch.nn.init as init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex x0 = xindex % 16 x1 = xindex // 16 % 4 x3 = xindex % 4 tmp0 = tl.load(in_ptr0 + x5, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-10 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tmp17 = tmp16 * tmp15 tl.store(out_ptr0 + x5, tmp15, xmask) tl.store(out_ptr1 + x5, tmp17, xmask) tl.store(out_ptr2 + x5, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0, buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf1, buf0 class L2NormNew(nn.Module): def __init__(self, n_channels, scale): super(L2NormNew, self).__init__() self.n_channels = n_channels self.gamma = scale or None self.eps = 1e-10 self.weight = nn.Parameter(torch.Tensor(self.n_channels)) self.reset_parameters() def reset_parameters(self): init.constant(self.weight, self.gamma) def forward(self, input_0): primals_2 = self.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
VisionLearningGroup/CDS
L2Norm
false
18,046
[ "MIT" ]
7
5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
https://github.com/VisionLearningGroup/CDS/tree/5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
UNetUpsamplingBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/6w/c6wtmdwxjo7wezkht7wgzzxxrhlxzpar6hx7dzg655ig7bbf4zmm.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 8) % 8 x0 = xindex % 8 x2 = (xindex // 64) x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/al/calk7bj7is5a6yydsir3cdgnezegqhetwwes3o6f52arepo4cf5f.py # Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit] # Source node to ATen node mapping: # x_1 => convolution # x_3 => add_4, add_5, mul_4, mul_5, repeat, rsqrt, sub, var_mean # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %unsqueeze_2), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %unsqueeze_4), kwargs = {}) triton_per_fused__native_batch_norm_legit_convolution_repeat_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_repeat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_repeat_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_repeat_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x0 % 4), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 64, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 64.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x0), tmp0, xmask) tl.store(in_out_ptr0 + (r3 + (64*x0)), tmp3, xmask) tl.store(out_ptr3 + (r3 + (64*x0)), tmp31, xmask) tl.store(out_ptr4 + (x0), tmp27, xmask) tl.store(out_ptr1 + (x0), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf3 = empty_strided_cuda((16, ), (1, ), torch.float32) buf2 = buf1; del buf1 # reuse buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) buf8 = empty_strided_cuda((1, 16, 8, 8), (1024, 64, 8, 1), torch.float32) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_3], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit] triton_per_fused__native_batch_norm_legit_convolution_repeat_1.run(buf2, primals_4, primals_3, primals_5, buf3, buf4, buf8, buf7, 16, 64, grid=grid(16), stream=stream0) del primals_3 del primals_4 del primals_5 return (reinterpret_tensor(buf8, (4, 4, 8, 8), (256, 64, 8, 1), 0), primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16, ), (1, ), 0), reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class UNetUpsamplingBlock(nn.Module): def __init__(self, in_channels, out_channels): super(UNetUpsamplingBlock, self).__init__() params = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'bias': True} self.conv = nn.Conv2d(in_channels, out_channels, **params) self.relu = nn.ReLU(inplace=True) self.instance_norm = nn.InstanceNorm2d(out_channels, affine=True) def forward(self, x): x = F.interpolate(x, scale_factor=2, mode='nearest') x = self.conv(x) x = self.relu(x) x = self.instance_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) @triton.jit def triton_per_fused__native_batch_norm_legit_convolution_repeat_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex r3 = rindex x1 = xindex % 4 tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_out_ptr0 + (r3 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0 % 4, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tl.full([1, 1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tl.where(xmask, tmp6, 0) tmp9 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tl.full([XBLOCK, 1], 64, tl.int32) tmp14 = tmp13.to(tl.float32) tmp15 = tmp12 / tmp14 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.where(xmask, tmp18, 0) tmp21 = tl.sum(tmp20, 1)[:, None] tmp22 = tmp5 - tmp15 tmp23 = 64.0 tmp24 = tmp21 / tmp23 tmp25 = 1e-05 tmp26 = tmp24 + tmp25 tmp27 = libdevice.rsqrt(tmp26) tmp28 = tmp22 * tmp27 tmp29 = tmp28 * tmp0 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x0, tmp0, xmask) tl.store(in_out_ptr0 + (r3 + 64 * x0), tmp3, xmask) tl.store(out_ptr3 + (r3 + 64 * x0), tmp31, xmask) tl.store(out_ptr4 + x0, tmp27, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1)) buf3 = empty_strided_cuda((16,), (1,), torch.float32) buf2 = buf1 del buf1 buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) buf8 = empty_strided_cuda((1, 16, 8, 8), (1024, 64, 8, 1), torch. float32) buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32 ) triton_per_fused__native_batch_norm_legit_convolution_repeat_1[grid(16) ](buf2, primals_4, primals_3, primals_5, buf3, buf4, buf8, buf7, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) del primals_3 del primals_4 del primals_5 return reinterpret_tensor(buf8, (4, 4, 8, 8), (256, 64, 8, 1), 0 ), primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16,), (1,), 0 ), reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0) class UNetUpsamplingBlockNew(nn.Module): def __init__(self, in_channels, out_channels): super(UNetUpsamplingBlockNew, self).__init__() params = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'bias': True} self.conv = nn.Conv2d(in_channels, out_channels, **params) self.relu = nn.ReLU(inplace=True) self.instance_norm = nn.InstanceNorm2d(out_channels, affine=True) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_4 = self.instance_norm.weight primals_5 = self.instance_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
TropComplique/bicycle-gan
UNetUpsamplingBlock
false
18,047
[ "MIT" ]
4
4bc8f4cdbe138e23c8a02c408cfb8e2ff7dfe6ab
https://github.com/TropComplique/bicycle-gan/tree/4bc8f4cdbe138e23c8a02c408cfb8e2ff7dfe6ab
_BahdanauAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/44/c44qgqrsnqzen4fn5emv6trgirlgxm3ednkk4by4h6c2vygx3iut.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%permute, %permute_1], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = (xindex // 32) x1 = (xindex // 8) % 4 x3 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x2) + (16*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x3), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cv/ccvvrbl5xq2dop2s3ilfb3hyf275whlxxdyq7j2gky5t2pg6gozw.py # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh] # Source node to ATen node mapping: # energy => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wd/cwdcgou5wrlu7uqoc7vbrkfi6bm7v333yqp6vr3jxwiuzpl2ziqk.py # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat_1 => repeat_1 # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_5, [4, 1]), kwargs = {}) triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/us/cusyqra2bg2vbuuxs26cz3sihdy7z4jtgux6v3orbezw7iu5eot5.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wg/cwgoxrbhmwbtk44l4btvoekxvdbagjk6vkr5n42rshizrndijbnh.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_5, buf3, 16, grid=grid(16), stream=stream0) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_2], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf5, buf6, 16, grid=grid(16), stream=stream0) del buf5 return (reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch from torch import nn from torch.nn import functional class _BahdanauAttention(nn.Module): def __init__(self, method, hidden_size): super(_BahdanauAttention, self).__init__() self.method = method self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def forward(self, hidden, encoder_outputs, mask=None): """ :param hidden: previous hidden state of the decoder, in shape (layers*directions,B,H) :param encoder_outputs: encoder outputs from Encoder, in shape (T,B,H) :param mask: used for masking. NoneType or tensor in shape (B) indicating sequence length :return attention energies in shape (B,T) """ max_len = encoder_outputs.size(0) H = hidden.repeat(max_len, 1, 1).transpose(0, 1) encoder_outputs = encoder_outputs.transpose(0, 1) attn_energies = self.score(H, encoder_outputs) if mask is not None: attn_energies = attn_energies.masked_fill(mask, -1e+18) return functional.softmax(attn_energies).unsqueeze(1) def score(self, hidden, encoder_outputs): energy = functional.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) energy = energy.transpose(2, 1) v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'method': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math from torch import nn from torch.nn import functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x1 = xindex // 8 % 4 x3 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + 16 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_5, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_4[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf2, buf6, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0) class _BahdanauAttentionNew(nn.Module): def __init__(self, method, hidden_size): super(_BahdanauAttentionNew, self).__init__() self.method = method self.hidden_size = hidden_size self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.rand(hidden_size)) stdv = 1.0 / math.sqrt(self.v.size(0)) self.v.data.normal_(mean=0, std=stdv) def score(self, hidden, encoder_outputs): energy = functional.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) energy = energy.transpose(2, 1) v = self.v.repeat(encoder_outputs.data.shape[0], 1).unsqueeze(1) energy = torch.bmm(v, energy) return energy.squeeze(1) def forward(self, input_0, input_1): primals_4 = self.v primals_3 = self.attn.weight primals_5 = self.attn.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
VarnithChordia/Multlingual_Punctuation_restoration
_BahdanauAttention
false
18,048
[ "MIT" ]
8
17c026e8935b9fecae01d446a756926c7733fcd1
https://github.com/VarnithChordia/Multlingual_Punctuation_restoration/tree/17c026e8935b9fecae01d446a756926c7733fcd1
loss_shape_exp
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/22/c222mpc7j2xclggovid57kukvuhjoju3d36smxw7jy4dl7wva5qp.py # Topologically Sorted Source Nodes: [mul, exp, sub, pow_1, mul_1, mean], Original ATen: [aten.mul, aten.exp, aten.sub, aten.pow, aten.mean] # Source node to ATen node mapping: # exp => exp # mean => mean # mul => mul # mul_1 => mul_1 # pow_1 => pow_1 # sub => sub # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %pow_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused_exp_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_exp_mean_mul_pow_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_exp_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_exp_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp4 = tl.load(in_ptr1 + (r0), None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp4 - tmp0 tmp6 = tmp5 * tmp5 tmp7 = tmp3 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mul, exp, sub, pow_1, mul_1, mean], Original ATen: [aten.mul, aten.exp, aten.sub, aten.pow, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_exp_mean_mul_pow_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class loss_shape_exp(nn.Module): def __init__(self): super().__init__() def forward(self, x, y, beta=2): return torch.mean(torch.exp(beta * y) * torch.pow(x - y, 2)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_exp_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp4 = tl.load(in_ptr1 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = tl_math.exp(tmp2) tmp5 = tmp4 - tmp0 tmp6 = tmp5 * tmp5 tmp7 = tmp3 * tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_exp_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class loss_shape_expNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Tsinghua-gongjing/StructureImpute
loss_shape_exp
false
18,049
[ "MIT" ]
9
59e33e913998a8841c2cb552828f0f0cc19ebc21
https://github.com/Tsinghua-gongjing/StructureImpute/tree/59e33e913998a8841c2cb552828f0f0cc19ebc21
ResBlk
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/cs/ccsc7yopxjonfnc354tpnjrivhnijup7yf4q33essmzkn5qvr4md.py # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, x, mul_1, add_1, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul_1 => mul_1 # pow_1 => pow_1 # rsqrt => rsqrt # x => mul # x_1 => maximum # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2, 3], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %mul), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) # %maximum : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%add_1, %primals_4), kwargs = {}) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp11 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp12 = tmp0 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp17 = triton_helpers.maximum(tmp15, tmp16) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp10, xmask) tl.store(out_ptr0 + (r1 + (16*x0)), tmp17, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/il/cil2cigexrsehthna2cae6knnqgrhgdaoj7rd7bfw2o7gqhugf4t.py # Topologically Sorted Source Nodes: [x_2, pow_2, mean_1, add_2, rsqrt_1, x_3, mul_3, add_3, x_4], Original ATen: [aten.convolution, aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] # Source node to ATen node mapping: # add_2 => add_2 # add_3 => add_3 # mean_1 => mean_1 # mul_3 => mul_3 # pow_2 => pow_2 # rsqrt_1 => rsqrt_1 # x_2 => convolution # x_3 => mul_2 # x_4 => maximum_1 # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%maximum, %primals_5, %primals_6, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%convolution, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [2, 3], True), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %mul_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_8), kwargs = {}) # %maximum_1 : [num_users=2] = call_function[target=torch.ops.aten.maximum.default](args = (%add_3, %primals_9), kwargs = {}) triton_per_fused_add_convolution_maximum_mean_mul_pow_rsqrt_1 = async_compile.triton('triton_per_fused_add_convolution_maximum_mean_mul_pow_rsqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_convolution_maximum_mean_mul_pow_rsqrt_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_convolution_maximum_mean_mul_pow_rsqrt_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 16.0 tmp9 = tmp7 / tmp8 tmp10 = 1e-05 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp14 = tmp2 * tmp12 tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp19 = triton_helpers.maximum(tmp17, tmp18) tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp12, xmask) tl.store(out_ptr0 + (r2 + (16*x3)), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wu/cwu4aunzr3ywj24slclusvueucyumpdk3xwdnttwgdrxicosuhz6.py # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # x_5 => convolution_1 # x_6 => add_4 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%maximum_1, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {}) triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, x, mul_1, add_1, x_1], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] stream0 = get_raw_stream(0) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0.run(buf1, primals_1, primals_2, primals_3, primals_4, buf2, 16, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3; del buf3 # reuse buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf6 = reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf5 # reuse buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2, pow_2, mean_1, add_2, rsqrt_1, x_3, mul_3, add_3, x_4], Original ATen: [aten.convolution, aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul, aten.maximum] triton_per_fused_add_convolution_maximum_mean_mul_pow_rsqrt_1.run(buf4, buf6, primals_6, primals_7, primals_8, primals_9, buf7, 16, 16, grid=grid(16), stream=stream0) del primals_6 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_2.run(buf9, primals_11, primals_1, 256, grid=grid(256), stream=stream0) del primals_11 return (buf9, primals_1, primals_2, primals_3, primals_4, primals_5, primals_7, primals_8, primals_9, primals_10, buf1, buf2, buf4, buf6, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class FRN(nn.Module): def __init__(self, num_features, eps=1e-05): super(FRN, self).__init__() self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.gamma = nn.Parameter(torch.ones(1, num_features, 1, 1)) self.beta = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.eps = eps def forward(self, x): x = x * torch.rsqrt(torch.mean(x ** 2, dim=[2, 3], keepdim=True) + self.eps) return torch.max(self.gamma * x + self.beta, self.tau) class ResBlk(nn.Module): """Preactivation residual block with filter response norm.""" def __init__(self, dim_in, dim_out, style_dim=64, downsample=False): super(ResBlk, self).__init__() self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1) self.downsample = downsample self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1) self.norm1 = FRN(dim_in) self.norm2 = FRN(dim_in) if self.downsample: self.conv1x1 = nn.Conv2d(dim_in, dim_out, 4, 2, 1) def _shortcut(self, x): if self.downsample: x = self.conv1x1(x) return x def _residual(self, x): x = self.norm1(x) x = self.conv1(x) x = self.norm2(x) if self.downsample: x = F.avg_pool2d(x, 2) x = self.conv2(x) return x def forward(self, x): x = self._residual(x) + self._shortcut(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp11 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp12 = tmp0 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp17 = triton_helpers.maximum(tmp15, tmp16) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp10, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp17, xmask) @triton.jit def triton_per_fused_add_convolution_maximum_mean_mul_pow_rsqrt_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = 16.0 tmp9 = tmp7 / tmp8 tmp10 = 1e-05 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp14 = tmp2 * tmp12 tmp15 = tmp13 * tmp14 tmp17 = tmp15 + tmp16 tmp19 = triton_helpers.maximum(tmp17, tmp18) tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp12, xmask) tl.store(out_ptr0 + (r2 + 16 * x3), tmp19, xmask) @triton.jit def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_10, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_maximum_mean_mul_pow_rsqrt_0[grid(16)](buf1, primals_1, primals_2, primals_3, primals_4, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf6 = reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf5 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_add_convolution_maximum_mean_mul_pow_rsqrt_1[grid(16) ](buf4, buf6, primals_6, primals_7, primals_8, primals_9, buf7, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_6 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1)) buf9 = buf8 del buf8 triton_poi_fused_add_convolution_2[grid(256)](buf9, primals_11, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_1, primals_2, primals_3, primals_4, primals_5, primals_7, primals_8, primals_9, primals_10, buf1, buf2, buf4, buf6, buf7) class FRN(nn.Module): def __init__(self, num_features, eps=1e-05): super(FRN, self).__init__() self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.gamma = nn.Parameter(torch.ones(1, num_features, 1, 1)) self.beta = nn.Parameter(torch.zeros(1, num_features, 1, 1)) self.eps = eps def forward(self, x): x = x * torch.rsqrt(torch.mean(x ** 2, dim=[2, 3], keepdim=True) + self.eps) return torch.max(self.gamma * x + self.beta, self.tau) class ResBlkNew(nn.Module): """Preactivation residual block with filter response norm.""" def __init__(self, dim_in, dim_out, style_dim=64, downsample=False): super(ResBlkNew, self).__init__() self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1) self.downsample = downsample self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1) self.norm1 = FRN(dim_in) self.norm2 = FRN(dim_in) if self.downsample: self.conv1x1 = nn.Conv2d(dim_in, dim_out, 4, 2, 1) def _shortcut(self, x): if self.downsample: x = self.conv1x1(x) return x def _residual(self, x): x = self.norm1(x) x = self.conv1(x) x = self.norm2(x) if self.downsample: x = F.avg_pool2d(x, 2) x = self.conv2(x) return x def forward(self, input_0): primals_5 = self.conv1.weight primals_6 = self.conv1.bias primals_10 = self.conv2.weight primals_11 = self.conv2.bias primals_2 = self.norm1.tau primals_3 = self.norm1.gamma primals_4 = self.norm1.beta primals_7 = self.norm2.tau primals_8 = self.norm2.gamma primals_9 = self.norm2.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
UdonDa/StarGAN-v2-pytorch-nonofficial
ResBlk
false
18,050
[ "MIT" ]
9
219df6b7fd4bd533686e2093ee914a337914ca9b
https://github.com/UdonDa/StarGAN-v2-pytorch-nonofficial/tree/219df6b7fd4bd533686e2093ee914a337914ca9b
TransformerEncoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ao/caohakair5wxirtyoqa77ignbks5astbylffylea37sbctqafycj.py # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/35/c35bozqtoagsjyq4q26yxatyisz7ai5h2jopctndu5cellhkrgrk.py # Topologically Sorted Source Nodes: [q_2], Original ATen: [aten.div] # Source node to ATen node mapping: # q_2 => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_1, 1.0), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((12*(x0 // 4)) + (48*x1) + (x0 % 4)), xmask) tmp1 = tl.load(in_ptr1 + (x2 % 4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5b/c5blzob5s6pwc6r2g3sgarsune3okwcbcukrrjv4nibbjaoxrmbp.py # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jz/cjzbdxtrpgwr7evumvox4ybrvnpkv76nymg6kalovwfexz4al2ec.py # Topologically Sorted Source Nodes: [attn_1, attn_2], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # attn_1 => add # attn_2 => amax, exp, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, 0.0), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_add_3 = async_compile.triton('triton_poi_fused__softmax_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 + tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 + tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tp/ctpsln2dva6cest7g4ejturc47z2eqojdn7vngubqnblavq4jihj.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/53/c53s7jioa243frefufoxqblcdbhb7rnhhkzrnsx4vvlhishas7iw.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/eq/ceqkt22etynsgdiire3npctu6ae2jyxstmwvc6an3vghfqql2yu2.py # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add_1 # src_1 => var_mean # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_7), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/6z/c6zahf5ye65saqqmps3f6q6adeffviegsxonkuv73ylppfyxij3s.py # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # src => add_1 # src_1 => add_2, add_3, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_7), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_3, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2n/c2n433qdiqanpisgdauyptqbsgl2o66klg3set562nxnce5boxvl.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_10,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_relu_threshold_backward_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7f/c7fwageohz6hwjegitb6j3yeg7h5eh4dqlekvyojy3gcwakmyyx2.py # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] # Source node to ATen node mapping: # src_2 => add_4 # Graph fragment: # %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_12), kwargs = {}) triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/x5/cx5gtyeablavvxuctulbbmxt6iktkzzq7jji7e3b4efuwhs7j2eu.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_5, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {}) triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vn/cvn6wpzho3qxzbnigol4pvjqtdlc2j4ikddxhcpnyvd73zs7v6ih.py # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # src_3 => add_5, add_6, mul_2, mul_3, rsqrt_1, sub_2, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_4, [2]), kwargs = {correction: 0, keepdim: True}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_4, %getitem_6), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_12), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_13), kwargs = {}) triton_poi_fused_native_layer_norm_11 = async_compile.triton('triton_poi_fused_native_layer_norm_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048, ), (1, )) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) assert_size_stride(primals_13, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_3, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) # Topologically Sorted Source Nodes: [q_2], Original ATen: [aten.div] triton_poi_fused_div_1.run(buf0, primals_3, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf0, primals_3, buf3, 64, grid=grid(64), stream=stream0) del buf0 del primals_3 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 1, 4), (1, 0, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1, attn_2], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf7, buf8, 4, 16, grid=grid(4, 16), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [attn_output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf9, buf10, buf11, primals_6, primals_7, buf12, 64, grid=grid(64), stream=stream0) del primals_7 buf13 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 2048), (1, 4), 0), out=buf13) buf14 = reinterpret_tensor(buf13, (4, 4, 2048), (8192, 2048, 1), 0); del buf13 # reuse buf20 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_8.run(buf14, primals_9, buf20, 32768, grid=grid(32768), stream=stream0) del primals_9 buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0); del buf15 # reuse # Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add] triton_poi_fused_add_9.run(buf16, buf12, primals_11, 64, grid=grid(64), stream=stream0) del primals_11 buf17 = buf11; del buf11 # reuse buf18 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_10.run(buf16, buf17, buf18, 16, grid=grid(16), stream=stream0) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_11.run(buf16, buf17, buf18, primals_12, primals_13, buf19, 64, grid=grid(64), stream=stream0) del buf17 del buf18 del primals_13 return (buf19, primals_1, primals_6, primals_12, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0), buf16, primals_10, buf20, primals_8, primals_4, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import warnings from torch import Tensor import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.init import xavier_uniform_ from torch.nn.init import constant_ import torch.nn.functional as F from typing import Optional from typing import Tuple from typing import List def _in_projection_packed(q: 'Tensor', k: 'Tensor', v: 'Tensor', w: 'Tensor', b: 'Optional[Tensor]'=None) ->List[Tensor]: E = q.size(-1) if k is v: if q is k: return F.linear(q, w, b).chunk(3, dim=-1) else: w_q, w_kv = w.split([E, E * 2]) if b is None: b_q = b_kv = None else: b_q, b_kv = b.split([E, E * 2]) return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk( 2, dim=-1) else: w_q, w_k, w_v = w.chunk(3) if b is None: b_q = b_k = b_v = None else: b_q, b_k, b_v = b.chunk(3) return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v) def scale_dot_attention(q: 'Tensor', k: 'Tensor', v: 'Tensor', dropout_p: 'float'=0.0, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Tensor]: _, _, E = q.shape q = q / math.sqrt(E) attn = torch.bmm(q, k.transpose(-2, -1)) if attn_mask is not None: attn = attn + attn_mask attn = F.softmax(attn, dim=-1) if dropout_p: attn = F.dropout(attn, p=dropout_p) out = torch.bmm(attn, v) return out, attn def multi_head_attention_forward(query: 'Tensor', key: 'Tensor', value: 'Tensor', num_heads: 'int', in_proj_weight: 'Tensor', in_proj_bias: 'Optional[Tensor]', dropout_p: 'float', out_proj_weight: 'Tensor', out_proj_bias: 'Optional[Tensor]', training: 'bool'=True, key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'=True, attn_mask: 'Optional[Tensor]'=None, use_separate_proj_weight=None, q_proj_weight: 'Optional[Tensor]'=None, k_proj_weight: 'Optional[Tensor]'=None, v_proj_weight: 'Optional[Tensor]'=None) ->Tuple[ Tensor, Optional[Tensor]]: tgt_len, bsz, embed_dim = query.shape src_len, _, _ = key.shape head_dim = embed_dim // num_heads q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) if attn_mask is not None: if attn_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) attn_mask = attn_mask else: assert attn_mask.is_floating_point( ) or attn_mask.dtype == torch.bool, f'Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}' if attn_mask.dim() == 2: correct_2d_size = tgt_len, src_len if attn_mask.shape != correct_2d_size: raise RuntimeError( f'The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.' ) attn_mask = attn_mask.unsqueeze(0) elif attn_mask.dim() == 3: correct_3d_size = bsz * num_heads, tgt_len, src_len if attn_mask.shape != correct_3d_size: raise RuntimeError( f'The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.' ) else: raise RuntimeError( f"attn_mask's dimension {attn_mask.dim()} is not supported") if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) key_padding_mask = key_padding_mask q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if key_padding_mask is not None: assert key_padding_mask.shape == (bsz, src_len ), f'expecting key_padding_mask shape of {bsz, src_len}, but got {key_padding_mask.shape}' key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len).expand( -1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len) if attn_mask is None: attn_mask = key_padding_mask elif attn_mask.dtype == torch.bool: attn_mask = attn_mask.logical_or(key_padding_mask) else: attn_mask = attn_mask.masked_fill(key_padding_mask, float('-inf')) if attn_mask is not None and attn_mask.dtype == torch.bool: new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float) new_attn_mask.masked_fill_(attn_mask, float('-inf')) attn_mask = new_attn_mask if not training: dropout_p = 0.0 attn_output, attn_output_weights = scale_dot_attention(q, k, v, attn_mask, dropout_p) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = nn.functional.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None def positional_encoding(X, num_features, dropout_p=0.1, max_len=512) ->Tensor: nn.Dropout(dropout_p) return X class MultiheadAttention(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim= None, vdim=None, batch_first=False) ->None: super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == self.embed_dim and self. vdim == self.embed_dim) self.num_heads = num_heads self.dropout = dropout self.batch_first = batch_first self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' if self._qkv_same_embed_dim is False: self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim))) self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim))) self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim))) self.register_parameter('in_proj_weight', None) else: self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim))) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: constant_(self.in_proj_bias, 0.0) constant_(self.out_proj.bias, 0.0) def forward(self, query: 'Tensor', key: 'Tensor', value: 'Tensor', key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'= True, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Optional[ Tensor]]: if self.batch_first: query, key, value = [x.transpose(1, 0) for x in (query, key, value) ] if not self._qkv_same_embed_dim: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask, use_separate_proj_weight=True, q_proj_weight= self.q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight) else: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask) if self.batch_first: return attn_output.transpose(1, 0), attn_output_weights else: return attn_output, attn_output_weights class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu, layer_norm_eps=1e-05, batch_first=False) ->None: super(TransformerEncoderLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = activation def forward(self, src: 'Tensor', src_mask: 'Optional[Tensor]'=None, src_key_padding_mask: 'Optional[Tensor]'=None) ->Tensor: src = positional_encoding(src, src.shape[-1]) src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout(src2) src = self.norm2(src) return src def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import warnings from torch import Tensor import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.init import xavier_uniform_ from torch.nn.init import constant_ import torch.nn.functional as F from typing import Optional from typing import Tuple from typing import List assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (12 * (x0 // 4) + 48 * x1 + x0 % 4), xmask) tmp1 = tl.load(in_ptr1 + x2 % 4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 + tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 + tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (2048, 4), (4, 1)) assert_size_stride(primals_9, (2048,), (1,)) assert_size_stride(primals_10, (4, 2048), (2048, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](buf0, primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) triton_poi_fused_div_1[grid(64)](buf0, primals_3, buf2, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](buf0, primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_3 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 1, 4), (1, 0, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_add_3[grid(256)](buf4, buf5, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf5 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_5 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_1, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_1, buf9, buf10, buf11, primals_6, primals_7, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf13 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 2048), (1, 4), 0), out=buf13) buf14 = reinterpret_tensor(buf13, (4, 4, 2048), (8192, 2048, 1), 0) del buf13 buf20 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_8[grid(32768)](buf14, primals_9, buf20, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf15 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 4), (1, 2048), 0), out=buf15) buf16 = reinterpret_tensor(buf15, (4, 4, 4), (16, 4, 1), 0) del buf15 triton_poi_fused_add_9[grid(64)](buf16, buf12, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf17 = buf11 del buf11 buf18 = buf10 del buf10 triton_poi_fused_native_layer_norm_10[grid(16)](buf16, buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_11[grid(64)](buf16, buf17, buf18, primals_12, primals_13, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf17 del buf18 del primals_13 return buf19, primals_1, primals_6, primals_12, buf6, reinterpret_tensor( buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(buf14, (16, 2048), (2048, 1), 0 ), buf16, primals_10, buf20, primals_8, primals_4, reinterpret_tensor( buf1, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 1), 0) def _in_projection_packed(q: 'Tensor', k: 'Tensor', v: 'Tensor', w: 'Tensor', b: 'Optional[Tensor]'=None) ->List[Tensor]: E = q.size(-1) if k is v: if q is k: return F.linear(q, w, b).chunk(3, dim=-1) else: w_q, w_kv = w.split([E, E * 2]) if b is None: b_q = b_kv = None else: b_q, b_kv = b.split([E, E * 2]) return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk( 2, dim=-1) else: w_q, w_k, w_v = w.chunk(3) if b is None: b_q = b_k = b_v = None else: b_q, b_k, b_v = b.chunk(3) return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v) def scale_dot_attention(q: 'Tensor', k: 'Tensor', v: 'Tensor', dropout_p: 'float'=0.0, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Tensor]: _, _, E = q.shape q = q / math.sqrt(E) attn = torch.bmm(q, k.transpose(-2, -1)) if attn_mask is not None: attn = attn + attn_mask attn = F.softmax(attn, dim=-1) if dropout_p: attn = F.dropout(attn, p=dropout_p) out = torch.bmm(attn, v) return out, attn def multi_head_attention_forward(query: 'Tensor', key: 'Tensor', value: 'Tensor', num_heads: 'int', in_proj_weight: 'Tensor', in_proj_bias: 'Optional[Tensor]', dropout_p: 'float', out_proj_weight: 'Tensor', out_proj_bias: 'Optional[Tensor]', training: 'bool'=True, key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'=True, attn_mask: 'Optional[Tensor]'=None, use_separate_proj_weight=None, q_proj_weight: 'Optional[Tensor]'=None, k_proj_weight: 'Optional[Tensor]'=None, v_proj_weight: 'Optional[Tensor]'=None) ->Tuple[ Tensor, Optional[Tensor]]: tgt_len, bsz, embed_dim = query.shape src_len, _, _ = key.shape head_dim = embed_dim // num_heads q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) if attn_mask is not None: if attn_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) attn_mask = attn_mask else: assert attn_mask.is_floating_point( ) or attn_mask.dtype == torch.bool, f'Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}' if attn_mask.dim() == 2: correct_2d_size = tgt_len, src_len if attn_mask.shape != correct_2d_size: raise RuntimeError( f'The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.' ) attn_mask = attn_mask.unsqueeze(0) elif attn_mask.dim() == 3: correct_3d_size = bsz * num_heads, tgt_len, src_len if attn_mask.shape != correct_3d_size: raise RuntimeError( f'The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.' ) else: raise RuntimeError( f"attn_mask's dimension {attn_mask.dim()} is not supported") if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) key_padding_mask = key_padding_mask q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if key_padding_mask is not None: assert key_padding_mask.shape == (bsz, src_len ), f'expecting key_padding_mask shape of {bsz, src_len}, but got {key_padding_mask.shape}' key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len).expand( -1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len) if attn_mask is None: attn_mask = key_padding_mask elif attn_mask.dtype == torch.bool: attn_mask = attn_mask.logical_or(key_padding_mask) else: attn_mask = attn_mask.masked_fill(key_padding_mask, float('-inf')) if attn_mask is not None and attn_mask.dtype == torch.bool: new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float) new_attn_mask.masked_fill_(attn_mask, float('-inf')) attn_mask = new_attn_mask if not training: dropout_p = 0.0 attn_output, attn_output_weights = scale_dot_attention(q, k, v, attn_mask, dropout_p) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = nn.functional.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None def positional_encoding(X, num_features, dropout_p=0.1, max_len=512) ->Tensor: nn.Dropout(dropout_p) return X class MultiheadAttention(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim= None, vdim=None, batch_first=False) ->None: super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == self.embed_dim and self. vdim == self.embed_dim) self.num_heads = num_heads self.dropout = dropout self.batch_first = batch_first self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' if self._qkv_same_embed_dim is False: self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim))) self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim))) self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim))) self.register_parameter('in_proj_weight', None) else: self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim))) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: constant_(self.in_proj_bias, 0.0) constant_(self.out_proj.bias, 0.0) def forward(self, query: 'Tensor', key: 'Tensor', value: 'Tensor', key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'= True, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Optional[ Tensor]]: if self.batch_first: query, key, value = [x.transpose(1, 0) for x in (query, key, value) ] if not self._qkv_same_embed_dim: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask, use_separate_proj_weight=True, q_proj_weight= self.q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight) else: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask) if self.batch_first: return attn_output.transpose(1, 0), attn_output_weights else: return attn_output, attn_output_weights class TransformerEncoderLayerNew(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu, layer_norm_eps=1e-05, batch_first=False) ->None: super(TransformerEncoderLayerNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = activation def forward(self, input_0): primals_2 = self.self_attn.in_proj_weight primals_3 = self.self_attn.in_proj_bias primals_4 = self.self_attn.out_proj.weight primals_5 = self.self_attn.out_proj.bias primals_8 = self.linear1.weight primals_9 = self.linear1.bias primals_10 = self.linear2.weight primals_6 = self.linear2.bias primals_7 = self.norm1.weight primals_11 = self.norm1.bias primals_12 = self.norm2.weight primals_13 = self.norm2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
Treedy2020/TransNet
TransformerEncoderLayer
false
18,051
[ "MIT" ]
4
dd0e43e1931153baea4e5fe8cb31dc5ff0cb7b09
https://github.com/Treedy2020/TransNet/tree/dd0e43e1931153baea4e5fe8cb31dc5ff0cb7b09
BertSelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/z7/cz7gvwpm6kooqnwr4vixflp6q5wq6isg5on5czrzmd63imnnltlp.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {}) # %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/pa/cpadi56vkuxukr63kmqryvzbnuortdig4tyvdogzts7swfocorqd.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {}) # %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {}) # %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {}) # %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {}) triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float("-inf") tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = (tmp29 != 0) tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = (tmp33 != 0) tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = (tmp38 != 0) tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = (tmp43 != 0) tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + (x2), tmp14, xmask) tl.store(out_ptr1 + (x2), tmp25, xmask) tl.store(out_ptr2 + (x2), tmp45, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gi/cgi6e7jgcuwbujn6r6natpglnb3qyo2zdrze4hhdmoiffdgmo3pd.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {}) # %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {}) # %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {}) # %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {}) triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = (xindex // 4) x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1) tmp2 = tl.load(in_out_ptr0 + (x4), xmask) tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ej/cejxfbejrrsaghh6aun6yxkqmy4riwh54nh4ajhql72uuev27cjd.py # Topologically Sorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: # Graph fragment: # %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5k/c5kufnc7mciff7by75wm2btl7xamphqljghinmvgmksxfleox4tp.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0) del buf11 return (reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch from torch import nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = float('-inf') tmp27 = tmp2 == tmp26 tmp28 = tmp27 == 0 tmp29 = tmp28.to(tl.int64) tmp30 = tmp29 != 0 tmp31 = tmp5 == tmp26 tmp32 = tmp31 == 0 tmp33 = tmp32.to(tl.int64) tmp34 = tmp33 != 0 tmp35 = tmp30 | tmp34 tmp36 = tmp9 == tmp26 tmp37 = tmp36 == 0 tmp38 = tmp37.to(tl.int64) tmp39 = tmp38 != 0 tmp40 = tmp35 | tmp39 tmp41 = tmp13 == tmp26 tmp42 = tmp41 == 0 tmp43 = tmp42.to(tl.int64) tmp44 = tmp43 != 0 tmp45 = tmp40 | tmp44 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) tl.store(out_ptr2 + x2, tmp45, xmask) @triton.jit def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x4 = xindex x5 = xindex % 64 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_out_ptr0 + x4, xmask) tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tmp0 == 0 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tmp10 = 0.0 tmp11 = tl.where(tmp1, tmp10, tmp9) tl.store(in_out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool) triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_8 buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf11 return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Ahren09/FinerFact
BertSelfAttention
false
18,052
[ "MIT" ]
9
68df3799fbfadd56fa69b019ca6fba0c482f21d3
https://github.com/Ahren09/FinerFact/tree/68df3799fbfadd56fa69b019ca6fba0c482f21d3
TransformerDecoderLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ao/caohakair5wxirtyoqa77ignbks5astbylffylea37sbctqafycj.py # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/35/c35bozqtoagsjyq4q26yxatyisz7ai5h2jopctndu5cellhkrgrk.py # Topologically Sorted Source Nodes: [q_2], Original ATen: [aten.div] # Source node to ATen node mapping: # q_2 => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_1, 1.0), kwargs = {}) triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + ((12*(x0 // 4)) + (48*x1) + (x0 % 4)), xmask) tmp1 = tl.load(in_ptr1 + (x2 % 4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5b/c5blzob5s6pwc6r2g3sgarsune3okwcbcukrrjv4nibbjaoxrmbp.py # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_1 => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (12*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jz/cjzbdxtrpgwr7evumvox4ybrvnpkv76nymg6kalovwfexz4al2ec.py # Topologically Sorted Source Nodes: [attn_1, attn_2], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # attn_1 => add # attn_2 => amax, exp, sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, 0.0), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_add_3 = async_compile.triton('triton_poi_fused__softmax_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 + tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 + tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/tp/ctpsln2dva6cest7g4ejturc47z2eqojdn7vngubqnblavq4jihj.py # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_2 => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/53/c53s7jioa243frefufoxqblcdbhb7rnhhkzrnsx4vvlhishas7iw.py # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_3 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_5,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/eq/ceqkt22etynsgdiire3npctu6ae2jyxstmwvc6an3vghfqql2yu2.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add_1 # tgt_1 => var_mean # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %view_7), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_1, [2]), kwargs = {correction: 0, keepdim: True}) triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/6z/c6zahf5ye65saqqmps3f6q6adeffviegsxonkuv73ylppfyxij3s.py # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] # Source node to ATen node mapping: # tgt => add_1 # tgt_1 => add_2, add_3, mul, mul_1, rsqrt, sub_1 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %view_7), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_3, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %getitem_4), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_6), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_7), kwargs = {}) triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ng/cng7oda4jktntfg2bck7nd3mwc6v6oisiznozdhw6mxwwzetmicm.py # Topologically Sorted Source Nodes: [contiguous_6], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_6 => clone_6 # Graph fragment: # %clone_6 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_10,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_8 = async_compile.triton('triton_poi_fused_clone_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2w/c2wleuslm4kkfxybhuavekivymcqhsap3mr5hys4qigwprk3s3vr.py # Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.div] # Source node to ATen node mapping: # q_5 => div_3 # Graph fragment: # %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_9, 1.0), kwargs = {}) triton_poi_fused_div_9 = async_compile.triton('triton_poi_fused_div_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x2 % 4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/2q/c2qqduvsqqpt5c4tq5sndtrvgyom2zostn5cyxncb74mjslg4kul.py # Topologically Sorted Source Nodes: [contiguous_5], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous_5 => clone_5 # Graph fragment: # %clone_5 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%getitem_9,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_10 = async_compile.triton('triton_poi_fused_clone_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xu/cxuo3ylge4wnwyjdzyeca5ijduhctvd5pc5wr6sqrlhf5jhgp565.py # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] # Source node to ATen node mapping: # tgt_2 => add_5 # Graph fragment: # %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_18), kwargs = {}) triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/os/cos3zldxhcfhtg3y6fdgfn2wle22zj4ucqsim5zksq4oc7slqonb.py # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt_3 => add_6, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_11, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) triton_poi_fused_native_layer_norm_12 = async_compile.triton('triton_poi_fused_native_layer_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4n/c4nmwqylzeupz45jpkruywenan7mftgg5xgg46obnpafdfgbwotw.py # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # tgt_3 => add_6, add_7, mul_2, mul_3, rsqrt_1, sub_3, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_5, [2]), kwargs = {correction: 0, keepdim: True}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_11, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %getitem_12), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_13), kwargs = {}) # %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_14), kwargs = {}) triton_poi_fused_native_layer_norm_13 = async_compile.triton('triton_poi_fused_native_layer_norm_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/my/cmysg3o7utvsnt57racehqiif24hyrgwh2p3cstddyrhrjuesodp.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_21,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_relu_threshold_backward_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32768 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (12, 4), (4, 1)) assert_size_stride(primals_9, (12, ), (1, )) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4, ), (1, )) assert_size_stride(primals_14, (4, ), (1, )) assert_size_stride(primals_15, (2048, 4), (4, 1)) assert_size_stride(primals_16, (2048, ), (1, )) assert_size_stride(primals_17, (4, 2048), (2048, 1)) assert_size_stride(primals_18, (4, ), (1, )) assert_size_stride(primals_19, (4, ), (1, )) assert_size_stride(primals_20, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_2], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_2, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) # Topologically Sorted Source Nodes: [q_2], Original ATen: [aten.div] triton_poi_fused_div_1.run(buf0, primals_2, buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf0, primals_2, buf3, 64, grid=grid(64), stream=stream0) del buf0 del primals_2 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 1, 4), (1, 0, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_1, attn_2], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_3.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf5, buf6, 256, grid=grid(256), stream=stream0) buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf7, buf8, 4, 16, grid=grid(4, 16), stream=stream0) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [attn_output_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_4, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_4 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_6.run(primals_5, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt, tgt_1], Original ATen: [aten.add, aten.native_layer_norm] triton_poi_fused_add_native_layer_norm_7.run(primals_5, buf9, buf10, buf11, primals_6, primals_7, buf12, 64, grid=grid(64), stream=stream0) del primals_7 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13) buf14 = empty_strided_cuda((16, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 8), (1, 4), 16), out=buf14) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_6], Original ATen: [aten.clone] triton_poi_fused_clone_8.run(buf14, primals_9, buf15, 64, grid=grid(64), stream=stream0) buf16 = reinterpret_tensor(buf13, (16, 4, 1), (1, 16, 64), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [q_5], Original ATen: [aten.div] triton_poi_fused_div_9.run(buf16, primals_9, 64, grid=grid(64), stream=stream0) buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_5], Original ATen: [aten.clone] triton_poi_fused_clone_10.run(buf14, primals_9, buf17, 64, grid=grid(64), stream=stream0) del buf14 del primals_9 buf18 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [attn_3], Original ATen: [aten.bmm] extern_kernels.bmm(buf16, reinterpret_tensor(buf17, (16, 1, 4), (1, 0, 16), 0), out=buf18) buf19 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_4, attn_5], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_3.run(buf18, buf19, 256, grid=grid(256), stream=stream0) buf20 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [attn_5], Original ATen: [aten._softmax] triton_poi_fused__softmax_4.run(buf19, buf20, 256, grid=grid(256), stream=stream0) del buf19 buf21 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf20, reinterpret_tensor(buf15, (16, 4, 1), (1, 16, 0), 0), out=buf21) buf22 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous_7], Original ATen: [aten.clone] triton_poi_fused_clone_5.run(buf21, buf22, 4, 16, grid=grid(4, 16), stream=stream0) buf23 = reinterpret_tensor(buf21, (16, 4), (4, 1), 0); del buf21 # reuse # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf22, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf23) buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0); del buf23 # reuse # Topologically Sorted Source Nodes: [tgt_2], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf24, buf12, primals_11, 64, grid=grid(64), stream=stream0) del primals_11 buf25 = buf11; del buf11 # reuse buf26 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_12.run(buf24, buf25, buf26, 16, grid=grid(16), stream=stream0) buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt_3], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_13.run(buf24, buf25, buf26, primals_13, primals_14, buf27, 64, grid=grid(64), stream=stream0) del primals_14 buf28 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf27, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 2048), (1, 4), 0), out=buf28) buf29 = reinterpret_tensor(buf28, (4, 4, 2048), (8192, 2048, 1), 0); del buf28 # reuse buf35 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_14.run(buf29, primals_16, buf35, 32768, grid=grid(32768), stream=stream0) del primals_16 buf30 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf29, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_17, (2048, 4), (1, 2048), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0); del buf30 # reuse # Topologically Sorted Source Nodes: [tgt_4], Original ATen: [aten.add] triton_poi_fused_add_11.run(buf31, buf27, primals_18, 64, grid=grid(64), stream=stream0) del primals_18 buf32 = buf26; del buf26 # reuse buf33 = buf25; del buf25 # reuse # Topologically Sorted Source Nodes: [tgt_5], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_12.run(buf31, buf32, buf33, 16, grid=grid(16), stream=stream0) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tgt_5], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_13.run(buf31, buf32, buf33, primals_19, primals_20, buf34, 64, grid=grid(64), stream=stream0) del buf32 del buf33 del primals_20 return (buf34, primals_5, primals_6, primals_13, primals_19, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf22, (16, 4), (4, 1), 0), buf24, reinterpret_tensor(buf27, (16, 4), (4, 1), 0), reinterpret_tensor(buf29, (16, 2048), (2048, 1), 0), buf31, primals_17, buf35, primals_15, primals_10, reinterpret_tensor(buf15, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf16, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf17, (16, 4, 1), (1, 16, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), primals_3, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import warnings from torch import Tensor import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.init import xavier_uniform_ from torch.nn.init import constant_ import torch.nn.functional as F from typing import Optional from typing import Tuple from typing import List def _in_projection_packed(q: 'Tensor', k: 'Tensor', v: 'Tensor', w: 'Tensor', b: 'Optional[Tensor]'=None) ->List[Tensor]: E = q.size(-1) if k is v: if q is k: return F.linear(q, w, b).chunk(3, dim=-1) else: w_q, w_kv = w.split([E, E * 2]) if b is None: b_q = b_kv = None else: b_q, b_kv = b.split([E, E * 2]) return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk( 2, dim=-1) else: w_q, w_k, w_v = w.chunk(3) if b is None: b_q = b_k = b_v = None else: b_q, b_k, b_v = b.chunk(3) return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v) def scale_dot_attention(q: 'Tensor', k: 'Tensor', v: 'Tensor', dropout_p: 'float'=0.0, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Tensor]: _, _, E = q.shape q = q / math.sqrt(E) attn = torch.bmm(q, k.transpose(-2, -1)) if attn_mask is not None: attn = attn + attn_mask attn = F.softmax(attn, dim=-1) if dropout_p: attn = F.dropout(attn, p=dropout_p) out = torch.bmm(attn, v) return out, attn def multi_head_attention_forward(query: 'Tensor', key: 'Tensor', value: 'Tensor', num_heads: 'int', in_proj_weight: 'Tensor', in_proj_bias: 'Optional[Tensor]', dropout_p: 'float', out_proj_weight: 'Tensor', out_proj_bias: 'Optional[Tensor]', training: 'bool'=True, key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'=True, attn_mask: 'Optional[Tensor]'=None, use_separate_proj_weight=None, q_proj_weight: 'Optional[Tensor]'=None, k_proj_weight: 'Optional[Tensor]'=None, v_proj_weight: 'Optional[Tensor]'=None) ->Tuple[ Tensor, Optional[Tensor]]: tgt_len, bsz, embed_dim = query.shape src_len, _, _ = key.shape head_dim = embed_dim // num_heads q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) if attn_mask is not None: if attn_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) attn_mask = attn_mask else: assert attn_mask.is_floating_point( ) or attn_mask.dtype == torch.bool, f'Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}' if attn_mask.dim() == 2: correct_2d_size = tgt_len, src_len if attn_mask.shape != correct_2d_size: raise RuntimeError( f'The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.' ) attn_mask = attn_mask.unsqueeze(0) elif attn_mask.dim() == 3: correct_3d_size = bsz * num_heads, tgt_len, src_len if attn_mask.shape != correct_3d_size: raise RuntimeError( f'The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.' ) else: raise RuntimeError( f"attn_mask's dimension {attn_mask.dim()} is not supported") if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) key_padding_mask = key_padding_mask q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if key_padding_mask is not None: assert key_padding_mask.shape == (bsz, src_len ), f'expecting key_padding_mask shape of {bsz, src_len}, but got {key_padding_mask.shape}' key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len).expand( -1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len) if attn_mask is None: attn_mask = key_padding_mask elif attn_mask.dtype == torch.bool: attn_mask = attn_mask.logical_or(key_padding_mask) else: attn_mask = attn_mask.masked_fill(key_padding_mask, float('-inf')) if attn_mask is not None and attn_mask.dtype == torch.bool: new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float) new_attn_mask.masked_fill_(attn_mask, float('-inf')) attn_mask = new_attn_mask if not training: dropout_p = 0.0 attn_output, attn_output_weights = scale_dot_attention(q, k, v, attn_mask, dropout_p) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = nn.functional.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None class MultiheadAttention(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim= None, vdim=None, batch_first=False) ->None: super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == self.embed_dim and self. vdim == self.embed_dim) self.num_heads = num_heads self.dropout = dropout self.batch_first = batch_first self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' if self._qkv_same_embed_dim is False: self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim))) self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim))) self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim))) self.register_parameter('in_proj_weight', None) else: self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim))) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: constant_(self.in_proj_bias, 0.0) constant_(self.out_proj.bias, 0.0) def forward(self, query: 'Tensor', key: 'Tensor', value: 'Tensor', key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'= True, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Optional[ Tensor]]: if self.batch_first: query, key, value = [x.transpose(1, 0) for x in (query, key, value) ] if not self._qkv_same_embed_dim: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask, use_separate_proj_weight=True, q_proj_weight= self.q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight) else: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask) if self.batch_first: return attn_output.transpose(1, 0), attn_output_weights else: return attn_output, attn_output_weights class TransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu, layer_norm_eps=1e-05, batch_first=False) ->None: super(TransformerDecoderLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first) self.multihead_attn = MultiheadAttention(d_model, nhead, dropout= dropout, batch_first=batch_first) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = activation def forward(self, tgt: 'Tensor', memory: 'Tensor', tgt_mask: 'Optional[Tensor]'=None, memory_mask: 'Optional[Tensor]'=None, tgt_key_padding_mask: 'Optional[Tensor]'=None, memory_key_padding_mask: 'Optional[Tensor]'=None) ->Tensor: tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] tgt = tgt + self.dropout1(tgt2) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask= memory_mask, key_padding_mask=memory_key_padding_mask)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import warnings from torch import Tensor import torch.nn as nn from torch.nn.parameter import Parameter from torch.nn.init import xavier_uniform_ from torch.nn.init import constant_ import torch.nn.functional as F from typing import Optional from typing import Tuple from typing import List assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (8 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (12 * (x0 // 4) + 48 * x1 + x0 % 4), xmask) tmp1 = tl.load(in_ptr1 + x2 % 4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 12 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp6 = tmp5 + tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 + tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 + tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tl_math.exp(tmp14) tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_clone_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_div_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2 % 4, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_clone_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_12(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (12, 4), (4, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (12, 4), (4, 1)) assert_size_stride(primals_9, (12,), (1,)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (2048, 4), (4, 1)) assert_size_stride(primals_16, (2048,), (1,)) assert_size_stride(primals_17, (4, 2048), (2048, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](buf0, primals_2, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 4, 1), (1, 16, 64), torch.float32) triton_poi_fused_div_1[grid(64)](buf0, primals_2, buf2, 64, XBLOCK= 64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_2[grid(64)](buf0, primals_2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del primals_2 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(buf3, (16, 1, 4), (1, 0, 16), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_add_3[grid(256)](buf4, buf5, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_4[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf1, (16, 4, 1), (1, 16, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 16)](buf7, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.addmm(primals_4, reinterpret_tensor(buf8, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9) del primals_4 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(16)](primals_5, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_7[grid(64)](primals_5, buf9, buf10, buf11, primals_6, primals_7, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf13 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13) buf14 = empty_strided_cuda((16, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_12, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_8, (4, 8), (1, 4), 16), out=buf14) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_8[grid(64)](buf14, primals_9, buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) buf16 = reinterpret_tensor(buf13, (16, 4, 1), (1, 16, 64), 0) del buf13 triton_poi_fused_div_9[grid(64)](buf16, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_10[grid(64)](buf14, primals_9, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del primals_9 buf18 = buf5 del buf5 extern_kernels.bmm(buf16, reinterpret_tensor(buf17, (16, 1, 4), (1, 0, 16), 0), out=buf18) buf19 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_add_3[grid(256)](buf18, buf19, 256, XBLOCK=128, num_warps=4, num_stages=1) buf20 = buf18 del buf18 triton_poi_fused__softmax_4[grid(256)](buf19, buf20, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf19 buf21 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf20, reinterpret_tensor(buf15, (16, 4, 1), (1, 16, 0), 0), out=buf21) buf22 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 16)](buf21, buf22, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) buf23 = reinterpret_tensor(buf21, (16, 4), (4, 1), 0) del buf21 extern_kernels.mm(reinterpret_tensor(buf22, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf23) buf24 = reinterpret_tensor(buf23, (4, 4, 4), (16, 4, 1), 0) del buf23 triton_poi_fused_add_11[grid(64)](buf24, buf12, primals_11, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf25 = buf11 del buf11 buf26 = buf10 del buf10 triton_poi_fused_native_layer_norm_12[grid(16)](buf24, buf25, buf26, 16, XBLOCK=16, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_13[grid(64)](buf24, buf25, buf26, primals_13, primals_14, buf27, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf28 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf27, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 2048), (1, 4), 0), out=buf28) buf29 = reinterpret_tensor(buf28, (4, 4, 2048), (8192, 2048, 1), 0) del buf28 buf35 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool) triton_poi_fused_relu_threshold_backward_14[grid(32768)](buf29, primals_16, buf35, 32768, XBLOCK=128, num_warps=4, num_stages=1) del primals_16 buf30 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf29, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_17, (2048, 4), (1, 2048), 0), out=buf30) buf31 = reinterpret_tensor(buf30, (4, 4, 4), (16, 4, 1), 0) del buf30 triton_poi_fused_add_11[grid(64)](buf31, buf27, primals_18, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_18 buf32 = buf26 del buf26 buf33 = buf25 del buf25 triton_poi_fused_native_layer_norm_12[grid(16)](buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_13[grid(64)](buf31, buf32, buf33, primals_19, primals_20, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf32 del buf33 del primals_20 return (buf34, primals_5, primals_6, primals_13, primals_19, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor( primals_12, (16, 4), (4, 1), 0), buf20, reinterpret_tensor(buf22, ( 16, 4), (4, 1), 0), buf24, reinterpret_tensor(buf27, (16, 4), (4, 1 ), 0), reinterpret_tensor(buf29, (16, 2048), (2048, 1), 0), buf31, primals_17, buf35, primals_15, primals_10, reinterpret_tensor(buf15, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf16, (16, 1, 4), ( 1, 1, 16), 0), reinterpret_tensor(buf17, (16, 4, 1), (1, 16, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), primals_3, reinterpret_tensor(buf1, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf2, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 1), 0)) def _in_projection_packed(q: 'Tensor', k: 'Tensor', v: 'Tensor', w: 'Tensor', b: 'Optional[Tensor]'=None) ->List[Tensor]: E = q.size(-1) if k is v: if q is k: return F.linear(q, w, b).chunk(3, dim=-1) else: w_q, w_kv = w.split([E, E * 2]) if b is None: b_q = b_kv = None else: b_q, b_kv = b.split([E, E * 2]) return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk( 2, dim=-1) else: w_q, w_k, w_v = w.chunk(3) if b is None: b_q = b_k = b_v = None else: b_q, b_k, b_v = b.chunk(3) return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v) def scale_dot_attention(q: 'Tensor', k: 'Tensor', v: 'Tensor', dropout_p: 'float'=0.0, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Tensor]: _, _, E = q.shape q = q / math.sqrt(E) attn = torch.bmm(q, k.transpose(-2, -1)) if attn_mask is not None: attn = attn + attn_mask attn = F.softmax(attn, dim=-1) if dropout_p: attn = F.dropout(attn, p=dropout_p) out = torch.bmm(attn, v) return out, attn def multi_head_attention_forward(query: 'Tensor', key: 'Tensor', value: 'Tensor', num_heads: 'int', in_proj_weight: 'Tensor', in_proj_bias: 'Optional[Tensor]', dropout_p: 'float', out_proj_weight: 'Tensor', out_proj_bias: 'Optional[Tensor]', training: 'bool'=True, key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'=True, attn_mask: 'Optional[Tensor]'=None, use_separate_proj_weight=None, q_proj_weight: 'Optional[Tensor]'=None, k_proj_weight: 'Optional[Tensor]'=None, v_proj_weight: 'Optional[Tensor]'=None) ->Tuple[ Tensor, Optional[Tensor]]: tgt_len, bsz, embed_dim = query.shape src_len, _, _ = key.shape head_dim = embed_dim // num_heads q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) if attn_mask is not None: if attn_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) attn_mask = attn_mask else: assert attn_mask.is_floating_point( ) or attn_mask.dtype == torch.bool, f'Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}' if attn_mask.dim() == 2: correct_2d_size = tgt_len, src_len if attn_mask.shape != correct_2d_size: raise RuntimeError( f'The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.' ) attn_mask = attn_mask.unsqueeze(0) elif attn_mask.dim() == 3: correct_3d_size = bsz * num_heads, tgt_len, src_len if attn_mask.shape != correct_3d_size: raise RuntimeError( f'The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.' ) else: raise RuntimeError( f"attn_mask's dimension {attn_mask.dim()} is not supported") if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: warnings.warn( 'Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.' ) key_padding_mask = key_padding_mask q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) if key_padding_mask is not None: assert key_padding_mask.shape == (bsz, src_len ), f'expecting key_padding_mask shape of {bsz, src_len}, but got {key_padding_mask.shape}' key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len).expand( -1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len) if attn_mask is None: attn_mask = key_padding_mask elif attn_mask.dtype == torch.bool: attn_mask = attn_mask.logical_or(key_padding_mask) else: attn_mask = attn_mask.masked_fill(key_padding_mask, float('-inf')) if attn_mask is not None and attn_mask.dtype == torch.bool: new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float) new_attn_mask.masked_fill_(attn_mask, float('-inf')) attn_mask = new_attn_mask if not training: dropout_p = 0.0 attn_output, attn_output_weights = scale_dot_attention(q, k, v, attn_mask, dropout_p) attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn_output = nn.functional.linear(attn_output, out_proj_weight, out_proj_bias) if need_weights: attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) return attn_output, attn_output_weights.sum(dim=1) / num_heads else: return attn_output, None class MultiheadAttention(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, kdim= None, vdim=None, batch_first=False) ->None: super(MultiheadAttention, self).__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self._qkv_same_embed_dim = (self.kdim == self.embed_dim and self. vdim == self.embed_dim) self.num_heads = num_heads self.dropout = dropout self.batch_first = batch_first self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads' if self._qkv_same_embed_dim is False: self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim))) self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim))) self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim))) self.register_parameter('in_proj_weight', None) else: self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim))) self.register_parameter('q_proj_weight', None) self.register_parameter('k_proj_weight', None) self.register_parameter('v_proj_weight', None) if bias: self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self._reset_parameters() def _reset_parameters(self): if self._qkv_same_embed_dim: xavier_uniform_(self.in_proj_weight) else: xavier_uniform_(self.q_proj_weight) xavier_uniform_(self.k_proj_weight) xavier_uniform_(self.v_proj_weight) if self.in_proj_bias is not None: constant_(self.in_proj_bias, 0.0) constant_(self.out_proj.bias, 0.0) def forward(self, query: 'Tensor', key: 'Tensor', value: 'Tensor', key_padding_mask: 'Optional[Tensor]'=None, need_weights: 'bool'= True, attn_mask: 'Optional[Tensor]'=None) ->Tuple[Tensor, Optional[ Tensor]]: if self.batch_first: query, key, value = [x.transpose(1, 0) for x in (query, key, value) ] if not self._qkv_same_embed_dim: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask, use_separate_proj_weight=True, q_proj_weight= self.q_proj_weight, k_proj_weight=self.k_proj_weight, v_proj_weight=self.v_proj_weight) else: attn_output, attn_output_weights = multi_head_attention_forward( query, key, value, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.dropout, self.out_proj.weight, self .out_proj.bias, training=self.training, key_padding_mask= key_padding_mask, need_weights=need_weights, attn_mask= attn_mask) if self.batch_first: return attn_output.transpose(1, 0), attn_output_weights else: return attn_output, attn_output_weights class TransformerDecoderLayerNew(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation=F.relu, layer_norm_eps=1e-05, batch_first=False) ->None: super(TransformerDecoderLayerNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first) self.multihead_attn = MultiheadAttention(d_model, nhead, dropout= dropout, batch_first=batch_first) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = activation def forward(self, input_0, input_1): primals_1 = self.self_attn.in_proj_weight primals_2 = self.self_attn.in_proj_bias primals_3 = self.self_attn.out_proj.weight primals_4 = self.self_attn.out_proj.bias primals_8 = self.multihead_attn.in_proj_weight primals_9 = self.multihead_attn.in_proj_bias primals_10 = self.multihead_attn.out_proj.weight primals_6 = self.multihead_attn.out_proj.bias primals_15 = self.linear1.weight primals_16 = self.linear1.bias primals_17 = self.linear2.weight primals_7 = self.linear2.bias primals_11 = self.norm1.weight primals_13 = self.norm1.bias primals_14 = self.norm2.weight primals_18 = self.norm2.bias primals_19 = self.norm3.weight primals_20 = self.norm3.bias primals_5 = input_0 primals_12 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0]
Treedy2020/TransNet
TransformerDecoderLayer
false
18,053
[ "MIT" ]
4
dd0e43e1931153baea4e5fe8cb31dc5ff0cb7b09
https://github.com/Treedy2020/TransNet/tree/dd0e43e1931153baea4e5fe8cb31dc5ff0cb7b09
distLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/3i/c3iwk6z43bt4ojjll4lqkdq3g2uyv7rkbwsjg5lywqy7pcnhhyhf.py # Topologically Sorted Source Nodes: [add_1, div_1], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add_1 => add_1 # div_1 => div_1 # Graph fragment: # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 1e-05), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %add_1), kwargs = {}) triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fi/cfi4dsjwhxk7pardm6qvsvncmq5fkisqec2ijro4cf4644sn57ho.py # Topologically Sorted Source Nodes: [add, x_normalized], Original ATen: [aten.add, aten.div] # Source node to ATen node mapping: # add => add # x_normalized => div # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {}) triton_poi_fused_add_div_1 = async_compile.triton('triton_poi_fused_add_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/n5/cn5km6vfmch3zqaeklytmn4ey4g3cucx757ukkl25zwwwnfushtj.py # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mul] # Source node to ATen node mapping: # scores => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 2), kwargs = {}) triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_1, div_1], Original ATen: [aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, x_normalized], Original ATen: [aten.add, aten.div] triton_poi_fused_add_div_1.run(primals_1, buf1, 256, grid=grid(256), stream=stream0) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [cos_dist], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mul] triton_poi_fused_mul_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [], Original ATen: [] buf4 = torch.ops.aten.set_.source_Tensor(primals_2, buf0) assert_size_stride(buf4, (4, 4), (4, 1)) del buf2 del primals_2 return (buf3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.nn.utils.weight_norm import WeightNorm import torch.utils.data class distLinear(nn.Module): def __init__(self, indim, outdim): super(distLinear, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = False if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, x): x_norm = torch.norm(x, p=2, dim=1).unsqueeze(1).expand_as(x) x_normalized = x.div(x_norm + 1e-05) if not self.class_wise_learnable_norm: L_norm = torch.norm(self.L.weight.data, p=2, dim=1).unsqueeze(1 ).expand_as(self.L.weight.data) self.L.weight.data = self.L.weight.data.div(L_norm + 1e-05) cos_dist = self.L(x_normalized) scores = self.scale_factor * cos_dist return scores def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'indim': 4, 'outdim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.utils.weight_norm import WeightNorm import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_add_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-05 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) @triton.jit def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_0[grid(16)](primals_2, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_1[grid(256)](primals_1, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_2[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = torch.ops.aten.set_.source_Tensor(primals_2, buf0) assert_size_stride(buf4, (4, 4), (4, 1)) del buf2 del primals_2 return buf3, reinterpret_tensor(buf1, (64, 4), (4, 1), 0) class distLinearNew(nn.Module): def __init__(self, indim, outdim): super(distLinearNew, self).__init__() self.L = nn.Linear(indim, outdim, bias=False) self.class_wise_learnable_norm = False if self.class_wise_learnable_norm: WeightNorm.apply(self.L, 'weight', dim=0) if outdim <= 200: self.scale_factor = 2 else: self.scale_factor = 10 def forward(self, input_0): primals_2 = self.L.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
VisionLearningGroup/CDS
distLinear
false
18,054
[ "MIT" ]
7
5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
https://github.com/VisionLearningGroup/CDS/tree/5b3644c286f19f76acdc03c6f6021a6f6e4ec4fc
BertAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/lh/clh4qkas6kymmge5sjkrotgaojywmpy5pjnwzrqo6ugbfmgmcf4n.py # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] # Source node to ATen node mapping: # attention_scores => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ek/cektoisdyqixwej57l3zqwuqwscvurzi5265oi3nvdhldh7dyed5.py # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax] # Source node to ATen node mapping: # attention_probs => amax, exp, sub, sum_1 # attention_scores_1 => div # attention_scores_2 => add # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_8), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) triton_poi_fused__softmax_add_div_1 = async_compile.triton('triton_poi_fused__softmax_add_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + (x2), tmp19, xmask) tl.store(out_ptr1 + (x2), tmp30, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ky/ckyvrwwqpclsov3axt2mifozl77oy7iqgbp4auvxjioltyp4dhci.py # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax] # Source node to ATen node mapping: # attention_probs => amax, div_1, exp, sub # attention_scores_1 => div # attention_scores_2 => add # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_8), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_add_div_2 = async_compile.triton('triton_poi_fused__softmax_add_div_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp3 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jw/cjwmckzja5e76tfn5paphr42zj35aqac3ck5tx4dkt7dnf6o7t5c.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/d5/cd5u4am64eikt5jmvbwbo7gzvrfl4ij2uzo4wj5fjknqhdtcvnwg.py # Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] # Source node to ATen node mapping: # add_1 => add_1 # pow_1 => pow_1 # s => mean_1 # sub => sub_1 # u => mean # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) triton_poi_fused_add_mean_pow_sub_4 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_pow_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) tl.store(out_ptr1 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/w5/cw53gaxgrpz33u4fzfpebjxuz2hfuk7e2jw57urbv3fw36h7mgtj.py # Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # hidden_states_2 => add_3 # mul => mul # sqrt => sqrt # sub => sub_1 # u => mean # x_3 => div_2 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_17, %primals_3), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1], True), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_1, %mean), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %div_2), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_12), kwargs = {}) triton_poi_fused_add_div_mean_mul_sqrt_sub_5 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr2 + (x2), xmask) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, ), (1, )) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax] triton_poi_fused__softmax_add_div_1.run(buf5, primals_8, buf6, buf7, 64, grid=grid(64), stream=stream0) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2, attention_probs], Original ATen: [aten.div, aten.add, aten._softmax] triton_poi_fused__softmax_add_div_2.run(buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0) del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_7, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse # Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm] extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_10 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [add_1, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow] triton_poi_fused_add_mean_pow_sub_4.run(buf12, primals_3, buf13, buf14, 16, grid=grid(16), stream=stream0) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add_1, u, sub, add_2, sqrt, x_3, mul, hidden_states_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_sqrt_sub_5.run(primals_11, buf12, primals_3, buf13, buf14, primals_12, buf15, 64, grid=grid(64), stream=stream0) del buf13 del buf14 del primals_12 return (buf15, buf8, primals_3, primals_11, buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer, attention_probs class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output, attn = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5, hidden_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_div_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp5 * tmp1 tmp8 = tmp6 + tmp7 tmp9 = triton_helpers.maximum(tmp4, tmp8) tmp11 = tmp10 * tmp1 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp9, tmp13) tmp16 = tmp15 * tmp1 tmp18 = tmp16 + tmp17 tmp19 = triton_helpers.maximum(tmp14, tmp18) tmp20 = tmp4 - tmp19 tmp21 = tl_math.exp(tmp20) tmp22 = tmp8 - tmp19 tmp23 = tl_math.exp(tmp22) tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp19 tmp26 = tl_math.exp(tmp25) tmp27 = tmp24 + tmp26 tmp28 = tmp18 - tmp19 tmp29 = tl_math.exp(tmp28) tmp30 = tmp27 + tmp29 tl.store(out_ptr0 + x2, tmp19, xmask) tl.store(out_ptr1 + x2, tmp30, xmask) @triton.jit def triton_poi_fused__softmax_add_div_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp3 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tmp7 = tl_math.exp(tmp6) tmp9 = tmp7 / tmp8 tl.store(in_out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mean_pow_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr2 + x2, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp7 = 1e-12 tmp8 = tmp6 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = tmp5 / tmp9 tmp11 = tmp0 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_div_1[grid(64)](buf5, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_add_div_2[grid(256)](buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_3[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0) del buf10 extern_kernels.addmm(primals_10, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12) del primals_10 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_mean_pow_sub_4[grid(16)](buf12, primals_3, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_sqrt_sub_5[grid(64)](primals_11, buf12, primals_3, buf13, buf14, primals_12, buf15, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf13 del buf14 del primals_12 return buf15, buf8, primals_3, primals_11, buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, primals_9, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer, attention_probs class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttentionNew(nn.Module): def __init__(self, config): super(BertAttentionNew, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_0, input_1): primals_1 = self.self.query.weight primals_2 = self.self.query.bias primals_4 = self.self.key.weight primals_5 = self.self.key.bias primals_6 = self.self.value.weight primals_7 = self.self.value.bias primals_9 = self.output.dense.weight primals_10 = self.output.dense.bias primals_11 = self.output.LayerNorm.weight primals_12 = self.output.LayerNorm.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
Vitvicky/mrc-for-flat-nested-ner
BertAttention
false
18,055
[ "Apache-2.0" ]
9
37099625e3002c334884fe982a6476e2c783da63
https://github.com/Vitvicky/mrc-for-flat-nested-ner/tree/37099625e3002c334884fe982a6476e2c783da63
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yk/cykngrj5r4zatonfn3o4zfsqmzd3zo4t6xwqgecrdlgp5ydjvh32.py # Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm] # Source node to ATen node mapping: # euclidean_distance => add, pow_1, pow_2, sub, sum_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%sub, 1e-06), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3]), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) triton_poi_fused_add_norm_sub_0 = async_compile.triton('triton_poi_fused_add_norm_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + (x0), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7t/c7ttbti65w3id2ui72ti6ccnfcpacmpvxwjo35tzqa7ggiopqvmy.py # Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean] # Source node to ATen node mapping: # add => add_1 # clamp => clamp_min # loss_contrastive => mean # mul => mul # mul_1 => mul_1 # pow_1 => pow_3 # pow_2 => pow_4 # sub => sub_1 # sub_1 => sub_2 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg2_1), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %pow_3), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %pow_2), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {}) # %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 2), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_1,), kwargs = {}) triton_per_fused_add_clamp_mean_mul_pow_rsub_1 = async_compile.triton('triton_per_fused_add_clamp_mean_mul_pow_rsub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_mean_mul_pow_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + (r2), None) tmp3 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 2.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [euclidean_distance], Original ATen: [aten.sub, aten.add, aten.norm] stream0 = get_raw_stream(0) triton_poi_fused_add_norm_sub_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sub, pow_1, mul, sub_1, clamp, pow_2, mul_1, add, loss_contrastive], Original ATen: [aten.rsub, aten.pow, aten.mul, aten.clamp, aten.add, aten.mean] triton_per_fused_add_clamp_mean_mul_pow_rsub_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class ContrastiveLoss(torch.nn.Module): def __init__(self, margin=2.0): super(ContrastiveLoss, self).__init__() self.margin = margin def forward(self, output1, output2, label): euclidean_distance = F.pairwise_distance(output1, output2) loss_contrastive = torch.mean((1 - label) * torch.pow( euclidean_distance, 2) + label * torch.pow(torch.clamp(self. margin - euclidean_distance, min=0.0), 2)) return loss_contrastive def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_norm_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = 1e-06 tmp4 = tmp2 + tmp3 tmp5 = tmp4 * tmp4 tmp8 = tmp6 - tmp7 tmp9 = tmp8 + tmp3 tmp10 = tmp9 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp12 - tmp13 tmp15 = tmp14 + tmp3 tmp16 = tmp15 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp18 - tmp19 tmp21 = tmp20 + tmp3 tmp22 = tmp21 * tmp21 tmp23 = tmp17 + tmp22 tmp24 = libdevice.sqrt(tmp23) tl.store(out_ptr0 + x0, tmp24, xmask) @triton.jit def triton_per_fused_add_clamp_mean_mul_pow_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp3 * tmp3 tmp5 = tmp2 * tmp4 tmp6 = 2.0 tmp7 = tmp6 - tmp3 tmp8 = 0.0 tmp9 = triton_helpers.maximum(tmp7, tmp8) tmp10 = tmp9 * tmp9 tmp11 = tmp0 * tmp10 tmp12 = tmp5 + tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_norm_sub_0[grid(64)](arg1_1, arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_clamp_mean_mul_pow_rsub_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(torch.nn.Module): def __init__(self, margin=2.0): super(ContrastiveLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
WLYLab/PepFormer
ContrastiveLoss
false
18,056
[ "MIT" ]
6
9bac4544dc88bcd66e975a6714a264dcc9c55304
https://github.com/WLYLab/PepFormer/tree/9bac4544dc88bcd66e975a6714a264dcc9c55304
WeightNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/pi/cpiv64zdylpogr2luvosojuac2zkif63bvqb7wnplkzxjmyhn2fq.py # Topologically Sorted Source Nodes: [x, sigmoid, x_3], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # sigmoid => sigmoid # x => convolution # x_3 => mul # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%permute,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 2), kwargs = {}) triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 2.0 tmp6 = tmp4 * tmp5 tl.store(in_out_ptr0 + (x0), tmp3, xmask) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4), (4, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x, sigmoid, x_3], Original ATen: [aten.convolution, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, primals_3, buf2, 16, grid=grid(16), stream=stream0) del primals_3 return (buf2, primals_1, primals_2, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class WeightNet(nn.Module): """WeightNet in Temporal interlace module. The WeightNet consists of two parts: one convolution layer and a sigmoid function. Following the convolution layer, the sigmoid function and rescale module can scale our output to the range (0, 2). Here we set the initial bias of the convolution layer to 0, and the final initial output will be 1.0. Args: in_channels (int): Channel num of input features. groups (int): Number of groups for fc layer outputs. """ def __init__(self, in_channels, groups): super().__init__() self.sigmoid = nn.Sigmoid() self.groups = groups self.conv = nn.Conv1d(in_channels, groups, 3, padding=1) self.init_weights() def init_weights(self): """Initiate the parameters either from existing checkpoint or from scratch.""" self.conv.bias.data[...] = 0 def forward(self, x): """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ n, _, t = x.shape x = self.conv(x) x = x.view(n, self.groups, t) x = x.permute(0, 2, 1) x = 2 * self.sigmoid(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'groups': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 2.0 tmp6 = tmp4 * tmp5 tl.store(in_out_ptr0 + x0, tmp3, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4), (4, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_mul_sigmoid_0[grid(16)](buf1, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_1, primals_2, buf1 class WeightNetNew(nn.Module): """WeightNet in Temporal interlace module. The WeightNet consists of two parts: one convolution layer and a sigmoid function. Following the convolution layer, the sigmoid function and rescale module can scale our output to the range (0, 2). Here we set the initial bias of the convolution layer to 0, and the final initial output will be 1.0. Args: in_channels (int): Channel num of input features. groups (int): Number of groups for fc layer outputs. """ def __init__(self, in_channels, groups): super().__init__() self.sigmoid = nn.Sigmoid() self.groups = groups self.conv = nn.Conv1d(in_channels, groups, 3, padding=1) self.init_weights() def init_weights(self): """Initiate the parameters either from existing checkpoint or from scratch.""" self.conv.bias.data[...] = 0 def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Viditagarwal7479/Video-Swin-Transformer
WeightNet
false
18,057
[ "Apache-2.0" ]
9
37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
https://github.com/Viditagarwal7479/Video-Swin-Transformer/tree/37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
PatchMerging
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/6i/c6iom7krjnqumkwuxdudnlx6zgqnm3a735cxbgjbakyzn4njxnt5.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.native_layer_norm] # Source node to ATen node mapping: # x => cat # x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_4, %slice_9, %slice_14, %slice_19], -1), kwargs = {}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%cat, [4]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_per_fused_cat_native_layer_norm_0 = async_compile.triton('triton_per_fused_cat_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_native_layer_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 4, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2 x1 = (xindex // 2) x3 = xindex tmp46 = tl.load(in_ptr1 + (r2), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (r2), None, eviction_policy='evict_last') tmp0 = r2 tmp1 = tl.full([1, 1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((8*x0) + (32*x1) + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + (8*x0) + (32*x1) + ((-4) + r2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 + (8*x0) + (32*x1) + ((-8) + r2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1, 1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tl.load(in_ptr0 + (20 + (8*x0) + (32*x1) + ((-12) + r2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tmp25 = tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tl.full([XBLOCK, 1], 16, tl.int32) tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 / tmp31 tmp33 = tmp23 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = 16.0 tmp40 = tmp38 / tmp39 tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = libdevice.rsqrt(tmp42) tmp44 = tmp22 - tmp32 tmp45 = tmp44 * tmp43 tmp47 = tmp45 * tmp46 tmp49 = tmp47 + tmp48 tl.store(out_ptr0 + (r2 + (16*x3)), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp43, xmask) tl.store(out_ptr2 + (r2 + (16*x3)), tmp49, xmask) tl.store(out_ptr1 + (x3), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (8, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 64), torch.float32) buf4 = reinterpret_tensor(buf2, (4, 4, 2, 2, 1), (16, 4, 2, 1, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.cat, aten.native_layer_norm] stream0 = get_raw_stream(0) triton_per_fused_cat_native_layer_norm_0.run(buf4, primals_1, primals_2, primals_3, buf0, buf1, buf5, 64, 16, grid=grid(64), stream=stream0) del primals_1 del primals_2 del primals_3 buf6 = empty_strided_cuda((64, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf5, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 8), (1, 16), 0), out=buf6) return (reinterpret_tensor(buf6, (4, 4, 2, 2, 8), (128, 32, 16, 8, 1), 0), buf0, buf1, buf4, reinterpret_tensor(buf5, (64, 16), (16, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((8, 16), (16, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class PatchMerging(nn.Module): """ Patch Merging Layer Args: dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, x): """ Forward function. Args: x: Input feature, tensor size (B, D, H, W, C). """ _B, _D, H, W, _C = x.shape pad_input = H % 2 == 1 or W % 2 == 1 if pad_input: x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) x0 = x[:, :, 0::2, 0::2, :] x1 = x[:, :, 1::2, 0::2, :] x2 = x[:, :, 0::2, 1::2, :] x3 = x[:, :, 1::2, 1::2, :] x = torch.cat([x0, x1, x2, x3], -1) x = self.norm(x) x = self.reduction(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_cat_native_layer_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 64 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 2 x1 = xindex // 2 x3 = xindex tmp46 = tl.load(in_ptr1 + r2, None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last') tmp0 = r2 tl.full([1, 1], 0, tl.int64) tmp3 = tl.full([1, 1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (8 * x0 + 32 * x1 + r2), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1, 1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + 8 * x0 + 32 * x1 + (-4 + r2)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1, 1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 + 8 * x0 + 32 * x1 + (-8 + r2)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1, 1], 16, tl.int64) tmp19 = tl.load(in_ptr0 + (20 + 8 * x0 + 32 * x1 + (-12 + r2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp14, tmp15, tmp19) tmp21 = tl.where(tmp9, tmp10, tmp20) tmp22 = tl.where(tmp4, tmp5, tmp21) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, RBLOCK]) tl.where(xmask, tmp23, 0) tmp26 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp28 = tl.where(xmask, tmp26, 0) tmp29 = tl.sum(tmp28, 1)[:, None] tmp30 = tl.full([XBLOCK, 1], 16, tl.int32) tmp31 = tmp30.to(tl.float32) tmp32 = tmp29 / tmp31 tmp33 = tmp23 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = tl.where(xmask, tmp35, 0) tmp38 = tl.sum(tmp37, 1)[:, None] tmp39 = 16.0 tmp40 = tmp38 / tmp39 tmp41 = 1e-05 tmp42 = tmp40 + tmp41 tmp43 = libdevice.rsqrt(tmp42) tmp44 = tmp22 - tmp32 tmp45 = tmp44 * tmp43 tmp47 = tmp45 * tmp46 tmp49 = tmp47 + tmp48 tl.store(out_ptr0 + (r2 + 16 * x3), tmp22, xmask) tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp43, xmask) tl.store(out_ptr2 + (r2 + 16 * x3), tmp49, xmask) tl.store(out_ptr1 + x3, tmp32, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (8, 16), (16, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 1), torch. float32) buf2 = empty_strided_cuda((4, 4, 2, 2, 1), (16, 4, 2, 1, 64), torch .float32) buf4 = reinterpret_tensor(buf2, (4, 4, 2, 2, 1), (16, 4, 2, 1, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 2, 2, 16), (256, 64, 32, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused_cat_native_layer_norm_0[grid(64)](buf4, primals_1, primals_2, primals_3, buf0, buf1, buf5, 64, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 del primals_2 del primals_3 buf6 = empty_strided_cuda((64, 8), (8, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 8), (1, 16), 0), out=buf6) return reinterpret_tensor(buf6, (4, 4, 2, 2, 8), (128, 32, 16, 8, 1), 0 ), buf0, buf1, buf4, reinterpret_tensor(buf5, (64, 16), (16, 1), 0 ), primals_4 class PatchMergingNew(nn.Module): """ Patch Merging Layer Args: dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, input_0): primals_4 = self.reduction.weight primals_2 = self.norm.weight primals_3 = self.norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Viditagarwal7479/Video-Swin-Transformer
PatchMerging
false
18,058
[ "Apache-2.0" ]
9
37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
https://github.com/Viditagarwal7479/Video-Swin-Transformer/tree/37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
TripletLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/kk/ckkwnvtsfkch4ren2jp3q2jv7b5ehrnnoxjeo4ddw6kalh5nzsmw.py # Topologically Sorted Source Nodes: [dist, clamp, dist_1, eq, mat_sim, sub, mul, add_1, sort, mul_1, add_2, sort_1], Original ATen: [aten.add, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.mul, aten.sort] # Source node to ATen node mapping: # add_1 => add_1 # add_2 => add_2 # clamp => clamp_min # dist => add # dist_1 => sqrt # eq => eq # mat_sim => convert_element_type # mul => mul # mul_1 => mul_1 # sort => sort # sort_1 => sort_1 # sub => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %permute), kwargs = {}) # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %add), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_tensor, 1e-12), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {}) # %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%expand_2, %permute_2), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -9999999.0), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul), kwargs = {}) # %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_1, 1, True), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 9999999.0), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_1), kwargs = {}) # %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_2, 1), kwargs = {}) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0 = async_compile.triton('triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0) tmp29 = tl.load(in_ptr1 + (x0 + (4*r1)), xmask, other=0.0) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = tmp11 + tmp22 tmp24 = tmp0 + tmp23 tmp25 = 1e-12 tmp26 = triton_helpers.maximum(tmp24, tmp25) tmp27 = libdevice.sqrt(tmp26) tmp30 = tmp28 == tmp29 tmp31 = tmp30.to(tl.float32) tmp32 = 1.0 tmp33 = tmp32 - tmp31 tmp34 = -9999999.0 tmp35 = tmp33 * tmp34 tmp36 = tmp27 + tmp35 tmp37 = r1 tmp38 = tmp37.to(tl.int16) tmp39 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp40 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41, tmp42, = triton_helpers.sort_with_index(tmp39, tmp40, None, 1, stable=False, descending=True) tmp43 = 9999999.0 tmp44 = tmp31 * tmp43 tmp45 = tmp27 + tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp47, tmp48, = triton_helpers.sort_with_index(tmp46, tmp40, None, 1, stable=False, descending=False) tl.store(in_out_ptr0 + (r1 + (4*x0)), tmp24, xmask) tl.store(out_ptr0 + (r1 + (4*x0)), tmp41, xmask) tl.store(out_ptr1 + (r1 + (4*x0)), tmp47, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/m6/cm6aazyadxuwi7w5qulouiibqrpqbbh4csdttovffshbwspavgqj.py # Topologically Sorted Source Nodes: [loss, gt, sum_3, mul_2, prec], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean, aten.gt, aten.sum, aten.div] # Source node to ATen node mapping: # gt => gt # loss => add_3, clamp_min_1, full_default, mean, mul_2, sub_1 # mul_2 => mul_3 # prec => div # sum_3 => sum_3 # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, %select), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %sub_1), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%mul_2, 4.0), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min_1,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%select_1, %select), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%gt,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 1.0), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_3, 4), kwargs = {}) triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1 = async_compile.triton('triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 4.0 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = tmp0 > tmp1 tmp13 = tmp12.to(tl.int64) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp16.to(tl.float32) tmp18 = 1.0 tmp19 = tmp17 * tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tmp22 = tmp11 / tmp5 tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None) tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp22, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [dist, clamp, dist_1, eq, mat_sim, sub, mul, add_1, sort, mul_1, add_2, sort_1], Original ATen: [aten.add, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.mul, aten.sort] stream0 = get_raw_stream(0) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0.run(buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, grid=grid(4), stream=stream0) del arg0_1 del arg1_1 del buf1 buf6 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) buf8 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [loss, gt, sum_3, mul_2, prec], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean, aten.gt, aten.sum, aten.div] triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1.run(buf8, buf4, buf2, buf9, 1, 4, grid=grid(1), stream=stream0) del buf2 del buf4 return (buf8, buf9, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import * from torch.optim.lr_scheduler import * def _batch_hard(mat_distance, mat_similarity, indice=False): sorted_mat_distance, positive_indices = torch.sort(mat_distance + - 9999999.0 * (1 - mat_similarity), dim=1, descending=True) hard_p = sorted_mat_distance[:, 0] hard_p_indice = positive_indices[:, 0] sorted_mat_distance, negative_indices = torch.sort(mat_distance + 9999999.0 * mat_similarity, dim=1, descending=False) hard_n = sorted_mat_distance[:, 0] hard_n_indice = negative_indices[:, 0] if indice: return hard_p, hard_n, hard_p_indice, hard_n_indice return hard_p, hard_n def euclidean_dist(x, y): m, n = x.size(0), y.size(0) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() dist = xx + yy dist.addmm_(1, -2, x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist class TripletLoss(nn.Module): """ Compute Triplet loss augmented with Batch Hard Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification' """ def __init__(self, margin, normalize_feature=False): super(TripletLoss, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.margin_loss = nn.MarginRankingLoss(margin=margin) def forward(self, emb, label): if self.normalize_feature: emb = F.normalize(emb) mat_dist = euclidean_dist(emb, emb) assert mat_dist.size(0) == mat_dist.size(1) N = mat_dist.size(0) mat_sim = label.expand(N, N).eq(label.expand(N, N).t()).float() dist_ap, dist_an = _batch_hard(mat_dist, mat_sim) assert dist_an.size(0) == dist_ap.size(0) y = torch.ones_like(dist_ap) loss = self.margin_loss(dist_an, dist_ap, y) prec = (dist_an.data > dist_ap.data).sum() * 1.0 / y.size(0) return loss, prec def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn import * from torch.optim.lr_scheduler import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr ): xnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp29 = tl.load(in_ptr1 + (x0 + 4 * r1), xmask, other=0.0) tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp12 * tmp12 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = tmp11 + tmp22 tmp24 = tmp0 + tmp23 tmp25 = 1e-12 tmp26 = triton_helpers.maximum(tmp24, tmp25) tmp27 = libdevice.sqrt(tmp26) tmp30 = tmp28 == tmp29 tmp31 = tmp30.to(tl.float32) tmp32 = 1.0 tmp33 = tmp32 - tmp31 tmp34 = -9999999.0 tmp35 = tmp33 * tmp34 tmp36 = tmp27 + tmp35 tmp37 = r1 tmp38 = tmp37.to(tl.int16) tmp39 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp40 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41, _tmp42 = triton_helpers.sort_with_index(tmp39, tmp40, None, 1, stable=False, descending=True) tmp43 = 9999999.0 tmp44 = tmp31 * tmp43 tmp45 = tmp27 + tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp47, _tmp48 = triton_helpers.sort_with_index(tmp46, tmp40, None, 1, stable=False, descending=False) tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp24, xmask) tl.store(out_ptr0 + (r1 + 4 * x0), tmp41, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp47, xmask) @triton.jit def triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = -1.0 tmp4 = tmp3 * tmp2 tmp5 = 4.0 tmp6 = tmp4 + tmp5 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp12 = tmp0 > tmp1 tmp13 = tmp12.to(tl.int64) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp16.to(tl.float32) tmp18 = 1.0 tmp19 = tmp17 * tmp18 tmp20 = 0.25 tmp21 = tmp19 * tmp20 tmp22 = tmp11 / tmp5 tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_0[grid(4)]( buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del buf1 buf6 = empty_strided_cuda((), (), torch.float32) buf9 = empty_strided_cuda((), (), torch.float32) buf8 = buf6 del buf6 triton_per_fused_add_clamp_min_div_gt_mean_mul_neg_sub_sum_1[grid(1)]( buf8, buf4, buf2, buf9, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf2 del buf4 return buf8, buf9 def _batch_hard(mat_distance, mat_similarity, indice=False): sorted_mat_distance, positive_indices = torch.sort(mat_distance + - 9999999.0 * (1 - mat_similarity), dim=1, descending=True) hard_p = sorted_mat_distance[:, 0] hard_p_indice = positive_indices[:, 0] sorted_mat_distance, negative_indices = torch.sort(mat_distance + 9999999.0 * mat_similarity, dim=1, descending=False) hard_n = sorted_mat_distance[:, 0] hard_n_indice = negative_indices[:, 0] if indice: return hard_p, hard_n, hard_p_indice, hard_n_indice return hard_p, hard_n def euclidean_dist(x, y): m, n = x.size(0), y.size(0) xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() dist = xx + yy dist.addmm_(1, -2, x, y.t()) dist = dist.clamp(min=1e-12).sqrt() return dist class TripletLossNew(nn.Module): """ Compute Triplet loss augmented with Batch Hard Details can be seen in 'In defense of the Triplet Loss for Person Re-Identification' """ def __init__(self, margin, normalize_feature=False): super(TripletLossNew, self).__init__() self.margin = margin self.normalize_feature = normalize_feature self.margin_loss = nn.MarginRankingLoss(margin=margin) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0], output[1]
WangWenhao0716/DomainMix
TripletLoss
false
18,059
[ "MIT" ]
8
2d9a20c1536177d1d71fbdc99f714eaf98fdfe92
https://github.com/WangWenhao0716/DomainMix/tree/2d9a20c1536177d1d71fbdc99f714eaf98fdfe92
PatchEmbed3D
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/jg/cjgyjqibnlbg7vprz7xz5qxjgczfy4fjlm7e6p5yctiggef4rpaj.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 4, 4], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3145728 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 8192) % 96 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (96, 3, 2, 4, 4), (96, 32, 16, 4, 1)) assert_size_stride(primals_3, (96, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 32, 16, 16), (786432, 8192, 256, 16, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_3, 3145728, grid=grid(3145728), stream=stream0) del primals_3 return (buf1, primals_1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((96, 3, 2, 4, 4), (96, 32, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class PatchEmbed3D(nn.Module): """ Video to Patch Embedding. Args: patch_size (int): Patch token size. Default: (2,4,4). in_chans (int): Number of input video channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(2, 4, 4), in_chans=3, embed_dim=96, norm_layer=None): super().__init__() self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, x): """Forward function.""" _, _, D, H, W = x.size() if W % self.patch_size[2] != 0: x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) if H % self.patch_size[1] != 0: x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1]) ) if D % self.patch_size[0] != 0: x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self. patch_size[0])) x = self.proj(x) if self.norm is not None: D, Wh, Ww = x.size(2), x.size(3), x.size(4) x = x.flatten(2).transpose(1, 2) x = self.norm(x) x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww) return x def get_inputs(): return [torch.rand([4, 3, 64, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 8192 % 96 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64, 64), (786432, 262144, 4096, 64, 1)) assert_size_stride(primals_2, (96, 3, 2, 4, 4), (96, 32, 16, 4, 1)) assert_size_stride(primals_3, (96,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 4, 4), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 96, 32, 16, 16), (786432, 8192, 256, 16, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(3145728)](buf1, primals_3, 3145728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_3 return buf1, primals_1, primals_2 class PatchEmbed3DNew(nn.Module): """ Video to Patch Embedding. Args: patch_size (int): Patch token size. Default: (2,4,4). in_chans (int): Number of input video channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, patch_size=(2, 4, 4), in_chans=3, embed_dim=96, norm_layer=None): super().__init__() self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, input_0): primals_2 = self.proj.weight primals_3 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Viditagarwal7479/Video-Swin-Transformer
PatchEmbed3D
false
18,060
[ "Apache-2.0" ]
9
37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
https://github.com/Viditagarwal7479/Video-Swin-Transformer/tree/37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
RobertaClassificationHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/qs/cqsgbydauqwdluexymrszxq7nqjh3tcbqqiq5ccrsj74pb354tqf.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_2 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class RobertaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = (config.classifier_dropout if config. classifier_dropout is not None else config.hidden_dropout_prob) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, x, **kwargs): x = self.dropout(x) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) x = self.out_proj(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, classifier_dropout= 0.5, num_labels=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class RobertaClassificationHeadNew(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = (config.classifier_dropout if config. classifier_dropout is not None else config.hidden_dropout_prob) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Amber-Chaeeunk/Open-Domain-Question-Answering
RobertaClassificationHead
false
18,061
[ "MIT" ]
5
725e369a4409c54bf11bcfb9db53865d8fc1f935
https://github.com/Amber-Chaeeunk/Open-Domain-Question-Answering/tree/725e369a4409c54bf11bcfb9db53865d8fc1f935
TemporallyBatchedAdditiveAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/64/c64zju5wtw4hvt42laa4lncy5hejt5mn5g7asuqeuhha6tiwedug.py # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # tanh => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 256 x0 = xindex % 64 x2 = (xindex // 256) x4 = xindex tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(out_ptr0 + (x4), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4x/c4xd6y4gkp7z3srq6gzq52swaegpimvl35zpaduo4j5wyernpskh.py # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x3), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mi/cmibf5zezxd6g5fvwgrxm77t4io4cybzrauehr6ghekpfqjr2jwl.py # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wq/cwqvg6epim6vuptbvchbcfezwg433jnvdexvu6at7an7rhvlobs2.py # Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # final_context_vec => sum_2 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_3 = async_compile.triton('triton_poi_fused_mul_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 16 x2 = (xindex // 64) x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x4), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(buf0, buf1, buf2, 1024, grid=grid(1024), stream=stream0) buf3 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_3.run(buf5, primals_2, buf6, 256, grid=grid(256), stream=stream0) return (buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 4, 16, 1, 1), 0), primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf2, buf5, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AdditiveAttention(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttention, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, encoder_states, decoder_state): score_vec = torch.cat([self.score(encoder_states[:, i], decoder_state) for i in range(encoder_states.shape[1])], dim=1) attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, attention_probs class TemporallyBatchedAdditiveAttention(AdditiveAttention): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(TemporallyBatchedAdditiveAttention, self).__init__( encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + torch.unsqueeze( self.w2(decoder_state), dim=1))) def forward(self, encoder_states, decoder_state): score_vec = self.score(encoder_states, decoder_state) attention_probs = F.softmax(score_vec, dim=1) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, torch.squeeze(torch.transpose( attention_probs, 1, 2), dim=3) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'encoder_hidden_state_dim': 4, 'decoder_hidden_state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 256 x0 = xindex % 64 x2 = xindex // 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(out_ptr0 + x4, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 16 x2 = xindex // 64 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1) del primals_3 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(1024)](buf0, buf1, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0) del buf1 extern_kernels.mm(reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3) buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0 ) del buf0 triton_poi_fused__softmax_1[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0) del buf3 triton_poi_fused__softmax_2[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_mul_sum_3[grid(256)](buf5, primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf6, reinterpret_tensor(buf5, (4, 4, 4, 4, 1), (64, 4, 16, 1, 1), 0 ), primals_2, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), buf2, buf5, primals_5 class AdditiveAttention(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttention, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, encoder_states, decoder_state): score_vec = torch.cat([self.score(encoder_states[:, i], decoder_state) for i in range(encoder_states.shape[1])], dim=1) attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, attention_probs class TemporallyBatchedAdditiveAttentionNew(AdditiveAttention): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(TemporallyBatchedAdditiveAttentionNew, self).__init__( encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + torch.unsqueeze( self.w2(decoder_state), dim=1))) def forward(self, input_0, input_1): primals_1 = self.w1.weight primals_3 = self.w2.weight primals_5 = self.v.weight primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
Vision-CAIR/HalentNet
TemporallyBatchedAdditiveAttention
false
18,062
[ "MIT" ]
4
dedef73c57c63aa580fc497fa42d512f4241a64b
https://github.com/Vision-CAIR/HalentNet/tree/dedef73c57c63aa580fc497fa42d512f4241a64b
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/hj/chjfyblsxig2iqn5c23z34sevqdunbl22uyh5h4xygjslw5n4bsl.py # Topologically Sorted Source Nodes: [mul_3, sub_3, mul_4, alpha_t, ce_loss, p, mul, sub, sub_1, mul_1, p_t, sub_2, pow_1, loss, loss_1], Original ATen: [aten.mul, aten.rsub, aten.add, aten.binary_cross_entropy_with_logits, aten.sigmoid, aten.pow] # Source node to ATen node mapping: # alpha_t => add_1 # ce_loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2 # loss => mul_3 # loss_1 => mul_6 # mul => mul_1 # mul_1 => mul_2 # mul_3 => mul_4 # mul_4 => mul_5 # p => sigmoid # p_t => add # pow_1 => pow_1 # sub => sub_3 # sub_1 => sub_4 # sub_2 => sub_5 # sub_3 => sub_6 # Graph fragment: # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, 0.75), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg1_1), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %pow_1), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %mul_3), kwargs = {}) triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0 = async_compile.triton('triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp8 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = 0.75 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp4 * tmp8 tmp10 = 0.0 tmp11 = triton_helpers.minimum(tmp10, tmp8) tmp12 = tl_math.abs(tmp8) tmp13 = -tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = libdevice.log1p(tmp14) tmp16 = tmp11 - tmp15 tmp17 = tmp9 - tmp16 tmp18 = tl.sigmoid(tmp8) tmp19 = tmp18 * tmp0 tmp20 = tmp3 - tmp18 tmp21 = tmp20 * tmp4 tmp22 = tmp19 + tmp21 tmp23 = tmp3 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp17 * tmp24 tmp26 = tmp7 * tmp25 tl.store(out_ptr0 + (x0), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_3, sub_3, mul_4, alpha_t, ce_loss, p, mul, sub, sub_1, mul_1, p_t, sub_2, pow_1, loss, loss_1], Original ATen: [aten.mul, aten.rsub, aten.add, aten.binary_cross_entropy_with_logits, aten.sigmoid, aten.pow] stream0 = get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, alpha: 'float'=0.25, gamma: 'float'=2, reduction: 'str'='none'): """ Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py . Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples or -1 for ignore. Default = 0.25 gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: Loss tensor with the reduction option applied. """ super().__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction def forward(self, inputs, targets): p = torch.sigmoid(inputs) ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none') p_t = p * targets + (1 - p) * (1 - targets) loss = ce_loss * (1 - p_t) ** self.gamma if self.alpha >= 0: alpha_t = self.alpha * targets + (1 - self.alpha) * (1 - targets) loss = alpha_t * loss return loss.mean() if self.reduction == 'mean' else loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0( in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp8 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.25 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp3 - tmp0 tmp5 = 0.75 tmp6 = tmp4 * tmp5 tmp7 = tmp2 + tmp6 tmp9 = tmp4 * tmp8 tmp10 = 0.0 tmp11 = triton_helpers.minimum(tmp10, tmp8) tmp12 = tl_math.abs(tmp8) tmp13 = -tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = libdevice.log1p(tmp14) tmp16 = tmp11 - tmp15 tmp17 = tmp9 - tmp16 tmp18 = tl.sigmoid(tmp8) tmp19 = tmp18 * tmp0 tmp20 = tmp3 - tmp18 tmp21 = tmp20 * tmp4 tmp22 = tmp19 + tmp21 tmp23 = tmp3 - tmp22 tmp24 = tmp23 * tmp23 tmp25 = tmp17 * tmp24 tmp26 = tmp7 * tmp25 tl.store(out_ptr0 + x0, tmp26, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_binary_cross_entropy_with_logits_mul_pow_rsub_sigmoid_0[ grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class FocalLossNew(nn.Module): def __init__(self, alpha: 'float'=0.25, gamma: 'float'=2, reduction: 'str'='none'): """ Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py . Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples or -1 for ignore. Default = 0.25 gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. reduction: 'none' | 'mean' | 'sum' 'none': No reduction will be applied to the output. 'mean': The output will be averaged. 'sum': The output will be summed. Returns: Loss tensor with the reduction option applied. """ super().__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
VisualJoyce/ChengyuBERT
FocalLoss
false
18,063
[ "MIT" ]
8
605db3a4b3241dd4d02baa41a68bf23b5b00b36d
https://github.com/VisualJoyce/ChengyuBERT/tree/605db3a4b3241dd4d02baa41a68bf23b5b00b36d
BMNLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/hm/chm7gzfihlw7kmzie4b2vyg3qfankkq2lqnx6llyvimlqimf45w6.py # Topologically Sorted Source Nodes: [gt_6, pmask_1, sum_9, num_positive_1, ratio_2, clamp_3, ratio_3, coef_3, mul_18, add_6, log_2, mul_19, mul_16, sub_4, coef_2, sub_5, mul_20, sub_6, add_7, log_3, mul_21, loss_3, mean, loss_4, gt_7, pmask_2, sum_10, num_positive_2, ratio_4, clamp_6, ratio_5, coef_5, mul_24, add_9, log_4, mul_25, mul_22, sub_7, coef_4, sub_8, mul_26, sub_9, add_10, log_5, mul_27, loss_5, mean_1, loss_6, loss_7, mul_28, pred_bm_reg, gt_iou_map, le, gt_1, and_, u_mmask, u_smmask_1, gt, u_hmask, num_h, num_m, r_m, sub, gt_3, u_smmask_2, add, le_1, gt_2, and__1, u_lmask, u_lmask_1, u_slmask_1, num_l, r_l, sub_1, gt_4, u_slmask_2, weights, mul_4, mul_5, loss, ones_like, mul_6, sum_4, mul_7, sum_5, loss_1, mul_29, add_13, gt_5, pmask, sum_6, num_positive, le_2, nmask, nmask_1, sum_7, num_entries, ratio, ratio_1, coef_1, pred_bm_cls, add_3, log, mul_11, loss_pos, mul_9, sub_2, coef_0, sub_3, add_4, log_1, mul_13, loss_neg, add_5, sum_8, mul_15, loss_2, mul_30, loss_8], Original ATen: [aten.gt, aten._to_copy, aten.sum, aten.clamp, aten.reciprocal, aten.mul, aten.add, aten.log, aten.sub, aten.div, aten.rsub, aten.mean, aten.neg, aten.clone, aten.le, aten.bitwise_and, aten.mse_loss, aten.ones_like] # Source node to ATen node mapping: # add => add # add_10 => add_10 # add_13 => add_13 # add_3 => add_3 # add_4 => add_4 # add_5 => add_5 # add_6 => add_6 # add_7 => add_7 # add_9 => add_9 # and_ => bitwise_and # and__1 => bitwise_and_1 # clamp_3 => clamp_min_3 # clamp_6 => clamp_min_5 # coef_0 => div_4 # coef_1 => mul_10 # coef_2 => div_6 # coef_3 => mul_18 # coef_4 => div_7 # coef_5 => mul_25 # gt => gt # gt_1 => gt_1 # gt_2 => gt_2 # gt_3 => gt_3 # gt_4 => gt_4 # gt_5 => gt_5 # gt_6 => gt_6 # gt_7 => gt_7 # gt_iou_map => mul # le => le # le_1 => le_1 # le_2 => le_2 # log => log # log_1 => log_1 # log_2 => log_2 # log_3 => log_3 # log_4 => log_4 # log_5 => log_5 # loss => mean, pow_1, sub_2 # loss_1 => div_2 # loss_2 => div_5 # loss_3 => add_8 # loss_4 => neg # loss_5 => add_11 # loss_6 => neg_1 # loss_7 => add_12 # loss_8 => add_14 # loss_neg => mul_14 # loss_pos => mul_12 # mean => mean_1 # mean_1 => mean_2 # mul_11 => mul_11 # mul_13 => mul_13 # mul_15 => mul_15 # mul_16 => mul_17 # mul_18 => mul_19 # mul_19 => mul_20 # mul_20 => mul_21 # mul_21 => mul_22 # mul_22 => mul_24 # mul_24 => mul_26 # mul_25 => mul_27 # mul_26 => mul_28 # mul_27 => mul_29 # mul_28 => mul_30 # mul_29 => mul_31 # mul_30 => mul_32 # mul_4 => mul_4 # mul_5 => mul_5 # mul_6 => mul_6 # mul_7 => mul_7 # mul_9 => mul_9 # nmask => convert_element_type_6 # nmask_1 => mul_8 # num_entries => add_2 # num_h => sum_1 # num_l => sum_3 # num_m => sum_2 # num_positive => clamp_min # num_positive_1 => clamp_min_2 # num_positive_2 => clamp_min_4 # ones_like => full_default # pmask => convert_element_type_5 # pmask_1 => convert_element_type_7 # pmask_2 => convert_element_type_8 # pred_bm_cls => clone_1 # pred_bm_reg => clone # r_l => div_1 # r_m => div # ratio => div_3 # ratio_1 => clamp_max, clamp_min_1 # ratio_2 => mul_16, reciprocal # ratio_3 => clamp_max_1 # ratio_4 => mul_23, reciprocal_1 # ratio_5 => clamp_max_2 # sub => sub # sub_1 => sub_1 # sub_2 => sub_3 # sub_3 => sub_4 # sub_4 => sub_5 # sub_5 => sub_6 # sub_6 => sub_7 # sub_7 => sub_8 # sub_8 => sub_9 # sub_9 => sub_10 # sum_10 => sum_10 # sum_4 => sum_4 # sum_5 => sum_5 # sum_6 => sum_6 # sum_7 => sum_7 # sum_8 => sum_8 # sum_9 => sum_9 # u_hmask => convert_element_type # u_lmask => convert_element_type_2 # u_lmask_1 => mul_1 # u_mmask => convert_element_type_1 # u_slmask_1 => mul_3 # u_slmask_2 => convert_element_type_4 # u_smmask_1 => mul_2 # u_smmask_2 => convert_element_type_3 # weights => add_1 # Graph fragment: # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view, 0.5), kwargs = {}) # %convert_element_type_7 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt_6, torch.float32), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type_7,), kwargs = {}) # %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_9, 1), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%clamp_min_2,), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 256), kwargs = {}) # %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_16, 1.05), kwargs = {}) # %clamp_max_1 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 21), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 0.5), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %convert_element_type_7), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, 1e-05), kwargs = {}) # %log_2 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_6,), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_19, %log_2), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 0.5), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_max_1, 1), kwargs = {}) # %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_17, %sub_5), kwargs = {}) # %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %convert_element_type_7), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_6, %sub_6), kwargs = {}) # %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %view_1), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_7, 1e-05), kwargs = {}) # %log_3 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_7,), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_21, %log_3), kwargs = {}) # %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_20, %mul_22), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_8,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {}) # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_2, 0.5), kwargs = {}) # %convert_element_type_8 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt_7, torch.float32), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type_8,), kwargs = {}) # %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_10, 1), kwargs = {}) # %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%clamp_min_4,), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 256), kwargs = {}) # %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_23, 1.05), kwargs = {}) # %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_5, 21), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_2, 0.5), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_25, %convert_element_type_8), kwargs = {}) # %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, 1e-05), kwargs = {}) # %log_4 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_9,), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_26, %log_4), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_2, 0.5), kwargs = {}) # %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_max_2, 1), kwargs = {}) # %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_24, %sub_8), kwargs = {}) # %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %convert_element_type_8), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_7, %sub_9), kwargs = {}) # %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %view_3), kwargs = {}) # %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_10, 1e-05), kwargs = {}) # %log_5 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_10,), kwargs = {}) # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_28, %log_5), kwargs = {}) # %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_27, %mul_29), kwargs = {}) # %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%add_11,), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_2,), kwargs = {}) # %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%neg, %neg_1), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_12, 1.0), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format}) # %mul : [num_users=8] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg2_1), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%mul, 0.7), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 0.3), kwargs = {}) # %bitwise_and : [num_users=1] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le, %gt_1), kwargs = {}) # %convert_element_type_1 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and, torch.float32), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_1, %rand), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 0.7), kwargs = {}) # %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%mul_2, %sub), kwargs = {}) # %convert_element_type_3 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt_3, torch.float32), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, %convert_element_type_3), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%mul, 0.3), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 0.0), kwargs = {}) # %bitwise_and_1 : [num_users=1] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%le_1, %gt_2), kwargs = {}) # %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%bitwise_and_1, torch.float32), kwargs = {}) # %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, %arg2_1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %rand_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_3), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_1), kwargs = {}) # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%mul_3, %sub_1), kwargs = {}) # %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt_4, torch.float32), kwargs = {}) # %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %convert_element_type_4), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone, %add_1), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, %mul_5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, %full_default), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 0.5), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_1,), kwargs = {}) # %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_7, %sum_5), kwargs = {}) # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, 10.0), kwargs = {}) # %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_30, %mul_31), kwargs = {}) # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 0.9), kwargs = {}) # %convert_element_type_5 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt_5, torch.float32), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type_5,), kwargs = {}) # %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_6, 1), kwargs = {}) # %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%mul, 0.9), kwargs = {}) # %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%le_2, torch.float32), kwargs = {}) # %mul_8 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_6, %arg2_1), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_8,), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_min, %sum_7), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %clamp_min), kwargs = {}) # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div_3, 1.05), kwargs = {}) # %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 21), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 0.5), kwargs = {}) # %clone_1 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%select_1,), kwargs = {memory_format: torch.contiguous_format}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%clone_1, 1e-05), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_3,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_10, %log), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_11, %convert_element_type_5), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 0.5), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_max, 1), kwargs = {}) # %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_9, %sub_3), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clone_1), kwargs = {}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_4, 1e-05), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_4,), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_4, %log_1), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_13, %mul_8), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, %mul_14), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_5,), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_8, -1), kwargs = {}) # %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_15, %add_2), kwargs = {}) # %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_5, 1.0), kwargs = {}) # %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %mul_32), kwargs = {}) triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0 = async_compile.triton('triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32', 14: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {13: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14), equal_to_1=(13,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': True, 'num_load': 10, 'num_reduction': 13, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr12, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = (rindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (r0), None) tmp19 = tl.load(in_ptr1 + (r0), None) tmp36 = tl.load(in_ptr2 + (r0), None) tmp37 = tl.load(in_ptr3 + (r0), None) tmp62 = tl.load(in_ptr4 + (r0), None) tmp69 = tl.load(in_out_ptr0 + (r0), None) tmp76 = tl.load(in_ptr5 + (r1 + (64*r2)), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr5 + (16 + r1 + (64*r2)), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr6 + (r0), None) tmp139 = tl.load(in_ptr7 + (r0), None) tmp1 = 0.5 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 1.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.full([1], 1, tl.int32) tmp10 = tmp9 / tmp8 tmp11 = 256.0 tmp12 = tmp10 * tmp11 tmp13 = 1.05 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = 21.0 tmp16 = triton_helpers.minimum(tmp14, tmp15) tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp3 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp18 * tmp22 tmp24 = tmp16 - tmp7 tmp25 = tmp17 / tmp24 tmp26 = tmp7 - tmp3 tmp27 = tmp25 * tmp26 tmp28 = tmp7 - tmp19 tmp29 = tmp28 + tmp20 tmp30 = tl_math.log(tmp29) tmp31 = tmp27 * tmp30 tmp32 = tmp23 + tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp38 = tmp36 * tmp37 tmp39 = 0.7 tmp40 = tmp38 > tmp39 tmp41 = tmp40.to(tl.float32) tmp42 = tl.broadcast_to(tmp41, [RBLOCK]) tmp44 = triton_helpers.promote_to_tensor(tl.sum(tmp42, 0)) tmp45 = tmp38 <= tmp39 tmp46 = 0.3 tmp47 = tmp38 > tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp48.to(tl.float32) tmp50 = tl.broadcast_to(tmp49, [RBLOCK]) tmp52 = triton_helpers.promote_to_tensor(tl.sum(tmp50, 0)) tmp53 = tmp38 <= tmp46 tmp54 = 0.0 tmp55 = tmp38 > tmp54 tmp56 = tmp53 & tmp55 tmp57 = tmp56.to(tl.float32) tmp58 = tmp57 * tmp37 tmp59 = tl.broadcast_to(tmp58, [RBLOCK]) tmp61 = triton_helpers.promote_to_tensor(tl.sum(tmp59, 0)) tmp63 = tmp49 * tmp62 tmp64 = tmp44 / tmp52 tmp65 = tmp7 - tmp64 tmp66 = tmp63 > tmp65 tmp67 = tmp66.to(tl.float32) tmp68 = tmp41 + tmp67 tmp70 = tmp58 * tmp69 tmp71 = tmp44 / tmp61 tmp72 = tmp7 - tmp71 tmp73 = tmp70 > tmp72 tmp74 = tmp73.to(tl.float32) tmp75 = tmp68 + tmp74 tmp77 = tmp76 * tmp75 tmp78 = tmp38 * tmp75 tmp79 = tmp77 - tmp78 tmp80 = tmp79 * tmp79 tmp81 = tl.broadcast_to(tmp80, [RBLOCK]) tmp83 = triton_helpers.promote_to_tensor(tl.sum(tmp81, 0)) tmp84 = 0.9 tmp85 = tmp38 > tmp84 tmp86 = tmp85.to(tl.float32) tmp87 = tl.broadcast_to(tmp86, [RBLOCK]) tmp89 = triton_helpers.promote_to_tensor(tl.sum(tmp87, 0)) tmp90 = tmp38 <= tmp84 tmp91 = tmp90.to(tl.float32) tmp92 = tmp91 * tmp37 tmp93 = tl.broadcast_to(tmp92, [RBLOCK]) tmp95 = triton_helpers.promote_to_tensor(tl.sum(tmp93, 0)) tmp96 = tl.broadcast_to(tmp75, [RBLOCK]) tmp98 = triton_helpers.promote_to_tensor(tl.sum(tmp96, 0)) tmp99 = tmp83 / tmp11 tmp100 = tmp99 * tmp7 tmp101 = tl.broadcast_to(tmp100, [RBLOCK]) tmp103 = triton_helpers.promote_to_tensor(tl.sum(tmp101, 0)) tmp104 = triton_helpers.maximum(tmp89, tmp7) tmp105 = tmp104 + tmp95 tmp106 = tmp105 / tmp104 tmp107 = triton_helpers.maximum(tmp106, tmp13) tmp108 = triton_helpers.minimum(tmp107, tmp15) tmp109 = tmp108 * tmp1 tmp111 = tmp110 + tmp20 tmp112 = tl_math.log(tmp111) tmp113 = tmp109 * tmp112 tmp114 = tmp113 * tmp86 tmp115 = tmp108 - tmp7 tmp116 = tmp109 / tmp115 tmp117 = tmp7 - tmp110 tmp118 = tmp117 + tmp20 tmp119 = tl_math.log(tmp118) tmp120 = tmp116 * tmp119 tmp121 = tmp120 * tmp92 tmp122 = tmp114 + tmp121 tmp123 = tl.broadcast_to(tmp122, [RBLOCK]) tmp125 = triton_helpers.promote_to_tensor(tl.sum(tmp123, 0)) tmp127 = tmp126 > tmp1 tmp128 = tmp127.to(tl.float32) tmp129 = tl.broadcast_to(tmp128, [RBLOCK]) tmp131 = triton_helpers.promote_to_tensor(tl.sum(tmp129, 0)) tmp132 = triton_helpers.maximum(tmp131, tmp7) tmp133 = tmp9 / tmp132 tmp134 = tmp133 * tmp11 tmp135 = triton_helpers.maximum(tmp134, tmp13) tmp136 = triton_helpers.minimum(tmp135, tmp15) tmp137 = tmp136 * tmp1 tmp138 = tmp137 * tmp128 tmp140 = tmp139 + tmp20 tmp141 = tl_math.log(tmp140) tmp142 = tmp138 * tmp141 tmp143 = tmp136 - tmp7 tmp144 = tmp137 / tmp143 tmp145 = tmp7 - tmp128 tmp146 = tmp144 * tmp145 tmp147 = tmp7 - tmp139 tmp148 = tmp147 + tmp20 tmp149 = tl_math.log(tmp148) tmp150 = tmp146 * tmp149 tmp151 = tmp142 + tmp150 tmp152 = tl.broadcast_to(tmp151, [RBLOCK]) tmp154 = triton_helpers.promote_to_tensor(tl.sum(tmp152, 0)) tmp155 = tmp35 / tmp11 tmp156 = -tmp155 tmp157 = tmp154 / tmp11 tmp158 = -tmp157 tmp159 = tmp156 + tmp158 tmp160 = tmp103 * tmp1 tmp161 = tmp160 / tmp98 tmp162 = -1.0 tmp163 = tmp125 * tmp162 tmp164 = tmp163 / tmp105 tmp165 = tmp159 * tmp7 tmp166 = 10.0 tmp167 = tmp161 * tmp166 tmp168 = tmp165 + tmp167 tmp169 = tmp164 * tmp7 tmp170 = tmp168 + tmp169 tl.debug_barrier() tl.store(in_out_ptr2 + (tl.full([1], 0, tl.int32)), tmp159, None) tl.debug_barrier() tl.store(in_out_ptr1 + (tl.full([1], 0, tl.int32)), tmp161, None) tl.debug_barrier() tl.store(in_out_ptr3 + (tl.full([1], 0, tl.int32)), tmp164, None) tl.store(out_ptr12 + (tl.full([1], 0, tl.int32)), tmp170, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [u_slmask], Original ATen: [aten.rand_like] buf11 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf12 = buf11 del buf11 # Topologically Sorted Source Nodes: [u_smmask], Original ATen: [aten.rand_like] buf7 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf8 = buf7 del buf7 buf2 = empty_strided_cuda((), (), torch.float32) buf14 = buf12; del buf12 # reuse buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15; del buf15 # reuse buf22 = empty_strided_cuda((), (), torch.float32) buf6 = buf2; del buf2 # reuse buf18 = buf16; del buf16 # reuse buf23 = buf22; del buf22 # reuse buf24 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [gt_6, pmask_1, sum_9, num_positive_1, ratio_2, clamp_3, ratio_3, coef_3, mul_18, add_6, log_2, mul_19, mul_16, sub_4, coef_2, sub_5, mul_20, sub_6, add_7, log_3, mul_21, loss_3, mean, loss_4, gt_7, pmask_2, sum_10, num_positive_2, ratio_4, clamp_6, ratio_5, coef_5, mul_24, add_9, log_4, mul_25, mul_22, sub_7, coef_4, sub_8, mul_26, sub_9, add_10, log_5, mul_27, loss_5, mean_1, loss_6, loss_7, mul_28, pred_bm_reg, gt_iou_map, le, gt_1, and_, u_mmask, u_smmask_1, gt, u_hmask, num_h, num_m, r_m, sub, gt_3, u_smmask_2, add, le_1, gt_2, and__1, u_lmask, u_lmask_1, u_slmask_1, num_l, r_l, sub_1, gt_4, u_slmask_2, weights, mul_4, mul_5, loss, ones_like, mul_6, sum_4, mul_7, sum_5, loss_1, mul_29, add_13, gt_5, pmask, sum_6, num_positive, le_2, nmask, nmask_1, sum_7, num_entries, ratio, ratio_1, coef_1, pred_bm_cls, add_3, log, mul_11, loss_pos, mul_9, sub_2, coef_0, sub_3, add_4, log_1, mul_13, loss_neg, add_5, sum_8, mul_15, loss_2, mul_30, loss_8], Original ATen: [aten.gt, aten._to_copy, aten.sum, aten.clamp, aten.reciprocal, aten.mul, aten.add, aten.log, aten.sub, aten.div, aten.rsub, aten.mean, aten.neg, aten.clone, aten.le, aten.bitwise_and, aten.mse_loss, aten.ones_like] stream0 = get_raw_stream(0) triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0.run(buf14, buf18, buf6, buf23, arg4_1, arg3_1, arg1_1, arg2_1, buf8, arg0_1, arg6_1, arg5_1, buf24, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 del arg5_1 del arg6_1 del buf14 del buf8 return (buf24, buf6, buf18, buf23, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg6_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn def binary_logistic_regression_loss(reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-05): """Binary Logistic Regression Loss.""" label = label.view(-1) reg_score = reg_score.contiguous().view(-1) pmask = (label > threshold).float() num_positive = max(torch.sum(pmask), 1) num_entries = len(label) ratio = num_entries / num_positive ratio = min(max(ratio, ratio_range[0]), ratio_range[1]) coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask ) * torch.log(1.0 - reg_score + eps) loss = -torch.mean(loss) return loss class BMNLoss(nn.Module): """BMN Loss. From paper https://arxiv.org/abs/1907.09702, code https://github.com/JJBOY/BMN-Boundary-Matching-Network. It will calculate loss for BMN Model. This loss is a weighted sum of 1) temporal evaluation loss based on confidence score of start and end positions. 2) proposal evaluation regression loss based on confidence scores of candidate proposals. 3) proposal evaluation classification loss based on classification results of candidate proposals. """ @staticmethod def tem_loss(pred_start, pred_end, gt_start, gt_end): """Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted start score by BMN model. pred_end (torch.Tensor): Predicted end score by BMN model. gt_start (torch.Tensor): Groundtruth confidence score for start. gt_end (torch.Tensor): Groundtruth confidence score for end. Returns: torch.Tensor: Returned binary logistic loss. """ loss_start = binary_logistic_regression_loss(pred_start, gt_start) loss_end = binary_logistic_regression_loss(pred_end, gt_end) loss = loss_start + loss_end return loss @staticmethod def pem_reg_loss(pred_score, gt_iou_map, mask, high_temporal_iou_threshold=0.7, low_temporal_iou_threshold=0.3): """Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. high_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.7. low_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.3. Returns: torch.Tensor: Proposal evalutaion regression loss. """ u_hmask = (gt_iou_map > high_temporal_iou_threshold).float() u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) & ( gt_iou_map > low_temporal_iou_threshold)).float() u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) & (gt_iou_map > 0.0)).float() u_lmask = u_lmask * mask num_h = torch.sum(u_hmask) num_m = torch.sum(u_mmask) num_l = torch.sum(u_lmask) r_m = num_h / num_m u_smmask = torch.rand_like(gt_iou_map) u_smmask = u_mmask * u_smmask u_smmask = (u_smmask > 1.0 - r_m).float() r_l = num_h / num_l u_slmask = torch.rand_like(gt_iou_map) u_slmask = u_lmask * u_slmask u_slmask = (u_slmask > 1.0 - r_l).float() weights = u_hmask + u_smmask + u_slmask loss = F.mse_loss(pred_score * weights, gt_iou_map * weights) loss = 0.5 * torch.sum(loss * torch.ones_like(weights)) / torch.sum( weights) return loss @staticmethod def pem_cls_loss(pred_score, gt_iou_map, mask, threshold=0.9, ratio_range=(1.05, 21), eps=1e-05): """Calculate Proposal Evaluation Module Classification Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. threshold (float): Threshold of temporal_iou for positive instances. Default: 0.9. ratio_range (tuple): Lower bound and upper bound for ratio. Default: (1.05, 21) eps (float): Epsilon for small value. Default: 1e-5 Returns: torch.Tensor: Proposal evalutaion classification loss. """ pmask = (gt_iou_map > threshold).float() nmask = (gt_iou_map <= threshold).float() nmask = nmask * mask num_positive = max(torch.sum(pmask), 1) num_entries = num_positive + torch.sum(nmask) ratio = num_entries / num_positive ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1]) coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio loss_pos = coef_1 * torch.log(pred_score + eps) * pmask loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries return loss def forward(self, pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask, weight_tem=1.0, weight_pem_reg=10.0, weight_pem_cls=1.0): """Calculate Boundary Matching Network Loss. Args: pred_bm (torch.Tensor): Predicted confidence score for boundary matching map. pred_start (torch.Tensor): Predicted confidence score for start. pred_end (torch.Tensor): Predicted confidence score for end. gt_iou_map (torch.Tensor): Groundtruth score for boundary matching map. gt_start (torch.Tensor): Groundtruth temporal_iou score for start. gt_end (torch.Tensor): Groundtruth temporal_iou score for end. bm_mask (torch.Tensor): Boundary-Matching mask. weight_tem (float): Weight for tem loss. Default: 1.0. weight_pem_reg (float): Weight for pem regression loss. Default: 10.0. weight_pem_cls (float): Weight for pem classification loss. Default: 1.0. Returns: tuple([torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): (loss, tem_loss, pem_reg_loss, pem_cls_loss). Loss is the bmn loss, tem_loss is the temporal evaluation loss, pem_reg_loss is the proposal evaluation regression loss, pem_cls_loss is the proposal evaluation classification loss. """ pred_bm_reg = pred_bm[:, 0].contiguous() pred_bm_cls = pred_bm[:, 1].contiguous() gt_iou_map = gt_iou_map * bm_mask pem_reg_loss = self.pem_reg_loss(pred_bm_reg, gt_iou_map, bm_mask) pem_cls_loss = self.pem_cls_loss(pred_bm_cls, gt_iou_map, bm_mask) tem_loss = self.tem_loss(pred_start, pred_end, gt_start, gt_end) loss = (weight_tem * tem_loss + weight_pem_reg * pem_reg_loss + weight_pem_cls * pem_cls_loss) return loss, tem_loss, pem_reg_loss, pem_cls_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0( in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr12, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex r1 = rindex % 16 r2 = rindex // 16 % 4 tmp0 = tl.load(in_ptr0 + r0, None) tmp19 = tl.load(in_ptr1 + r0, None) tmp36 = tl.load(in_ptr2 + r0, None) tmp37 = tl.load(in_ptr3 + r0, None) tmp62 = tl.load(in_ptr4 + r0, None) tmp69 = tl.load(in_out_ptr0 + r0, None) tmp76 = tl.load(in_ptr5 + (r1 + 64 * r2), None, eviction_policy= 'evict_last') tmp110 = tl.load(in_ptr5 + (16 + r1 + 64 * r2), None, eviction_policy= 'evict_last') tmp126 = tl.load(in_ptr6 + r0, None) tmp139 = tl.load(in_ptr7 + r0, None) tmp1 = 0.5 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 1.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tl.full([1], 1, tl.int32) tmp10 = tmp9 / tmp8 tmp11 = 256.0 tmp12 = tmp10 * tmp11 tmp13 = 1.05 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = 21.0 tmp16 = triton_helpers.minimum(tmp14, tmp15) tmp17 = tmp16 * tmp1 tmp18 = tmp17 * tmp3 tmp20 = 1e-05 tmp21 = tmp19 + tmp20 tmp22 = tl_math.log(tmp21) tmp23 = tmp18 * tmp22 tmp24 = tmp16 - tmp7 tmp25 = tmp17 / tmp24 tmp26 = tmp7 - tmp3 tmp27 = tmp25 * tmp26 tmp28 = tmp7 - tmp19 tmp29 = tmp28 + tmp20 tmp30 = tl_math.log(tmp29) tmp31 = tmp27 * tmp30 tmp32 = tmp23 + tmp31 tmp33 = tl.broadcast_to(tmp32, [RBLOCK]) tmp35 = triton_helpers.promote_to_tensor(tl.sum(tmp33, 0)) tmp38 = tmp36 * tmp37 tmp39 = 0.7 tmp40 = tmp38 > tmp39 tmp41 = tmp40.to(tl.float32) tmp42 = tl.broadcast_to(tmp41, [RBLOCK]) tmp44 = triton_helpers.promote_to_tensor(tl.sum(tmp42, 0)) tmp45 = tmp38 <= tmp39 tmp46 = 0.3 tmp47 = tmp38 > tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp48.to(tl.float32) tmp50 = tl.broadcast_to(tmp49, [RBLOCK]) tmp52 = triton_helpers.promote_to_tensor(tl.sum(tmp50, 0)) tmp53 = tmp38 <= tmp46 tmp54 = 0.0 tmp55 = tmp38 > tmp54 tmp56 = tmp53 & tmp55 tmp57 = tmp56.to(tl.float32) tmp58 = tmp57 * tmp37 tmp59 = tl.broadcast_to(tmp58, [RBLOCK]) tmp61 = triton_helpers.promote_to_tensor(tl.sum(tmp59, 0)) tmp63 = tmp49 * tmp62 tmp64 = tmp44 / tmp52 tmp65 = tmp7 - tmp64 tmp66 = tmp63 > tmp65 tmp67 = tmp66.to(tl.float32) tmp68 = tmp41 + tmp67 tmp70 = tmp58 * tmp69 tmp71 = tmp44 / tmp61 tmp72 = tmp7 - tmp71 tmp73 = tmp70 > tmp72 tmp74 = tmp73.to(tl.float32) tmp75 = tmp68 + tmp74 tmp77 = tmp76 * tmp75 tmp78 = tmp38 * tmp75 tmp79 = tmp77 - tmp78 tmp80 = tmp79 * tmp79 tmp81 = tl.broadcast_to(tmp80, [RBLOCK]) tmp83 = triton_helpers.promote_to_tensor(tl.sum(tmp81, 0)) tmp84 = 0.9 tmp85 = tmp38 > tmp84 tmp86 = tmp85.to(tl.float32) tmp87 = tl.broadcast_to(tmp86, [RBLOCK]) tmp89 = triton_helpers.promote_to_tensor(tl.sum(tmp87, 0)) tmp90 = tmp38 <= tmp84 tmp91 = tmp90.to(tl.float32) tmp92 = tmp91 * tmp37 tmp93 = tl.broadcast_to(tmp92, [RBLOCK]) tmp95 = triton_helpers.promote_to_tensor(tl.sum(tmp93, 0)) tmp96 = tl.broadcast_to(tmp75, [RBLOCK]) tmp98 = triton_helpers.promote_to_tensor(tl.sum(tmp96, 0)) tmp99 = tmp83 / tmp11 tmp100 = tmp99 * tmp7 tmp101 = tl.broadcast_to(tmp100, [RBLOCK]) tmp103 = triton_helpers.promote_to_tensor(tl.sum(tmp101, 0)) tmp104 = triton_helpers.maximum(tmp89, tmp7) tmp105 = tmp104 + tmp95 tmp106 = tmp105 / tmp104 tmp107 = triton_helpers.maximum(tmp106, tmp13) tmp108 = triton_helpers.minimum(tmp107, tmp15) tmp109 = tmp108 * tmp1 tmp111 = tmp110 + tmp20 tmp112 = tl_math.log(tmp111) tmp113 = tmp109 * tmp112 tmp114 = tmp113 * tmp86 tmp115 = tmp108 - tmp7 tmp116 = tmp109 / tmp115 tmp117 = tmp7 - tmp110 tmp118 = tmp117 + tmp20 tmp119 = tl_math.log(tmp118) tmp120 = tmp116 * tmp119 tmp121 = tmp120 * tmp92 tmp122 = tmp114 + tmp121 tmp123 = tl.broadcast_to(tmp122, [RBLOCK]) tmp125 = triton_helpers.promote_to_tensor(tl.sum(tmp123, 0)) tmp127 = tmp126 > tmp1 tmp128 = tmp127.to(tl.float32) tmp129 = tl.broadcast_to(tmp128, [RBLOCK]) tmp131 = triton_helpers.promote_to_tensor(tl.sum(tmp129, 0)) tmp132 = triton_helpers.maximum(tmp131, tmp7) tmp133 = tmp9 / tmp132 tmp134 = tmp133 * tmp11 tmp135 = triton_helpers.maximum(tmp134, tmp13) tmp136 = triton_helpers.minimum(tmp135, tmp15) tmp137 = tmp136 * tmp1 tmp138 = tmp137 * tmp128 tmp140 = tmp139 + tmp20 tmp141 = tl_math.log(tmp140) tmp142 = tmp138 * tmp141 tmp143 = tmp136 - tmp7 tmp144 = tmp137 / tmp143 tmp145 = tmp7 - tmp128 tmp146 = tmp144 * tmp145 tmp147 = tmp7 - tmp139 tmp148 = tmp147 + tmp20 tmp149 = tl_math.log(tmp148) tmp150 = tmp146 * tmp149 tmp151 = tmp142 + tmp150 tmp152 = tl.broadcast_to(tmp151, [RBLOCK]) tmp154 = triton_helpers.promote_to_tensor(tl.sum(tmp152, 0)) tmp155 = tmp35 / tmp11 tmp156 = -tmp155 tmp157 = tmp154 / tmp11 tmp158 = -tmp157 tmp159 = tmp156 + tmp158 tmp160 = tmp103 * tmp1 tmp161 = tmp160 / tmp98 tmp162 = -1.0 tmp163 = tmp125 * tmp162 tmp164 = tmp163 / tmp105 tmp165 = tmp159 * tmp7 tmp166 = 10.0 tmp167 = tmp161 * tmp166 tmp168 = tmp165 + tmp167 tmp169 = tmp164 * tmp7 tmp170 = tmp168 + tmp169 tl.debug_barrier() tl.store(in_out_ptr2 + tl.full([1], 0, tl.int32), tmp159, None) tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([1], 0, tl.int32), tmp161, None) tl.debug_barrier() tl.store(in_out_ptr3 + tl.full([1], 0, tl.int32), tmp164, None) tl.store(out_ptr12 + tl.full([1], 0, tl.int32), tmp170, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf11 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf12 = buf11 del buf11 buf7 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf8 = buf7 del buf7 buf2 = empty_strided_cuda((), (), torch.float32) buf14 = buf12 del buf12 buf15 = empty_strided_cuda((), (), torch.float32) buf16 = buf15 del buf15 buf22 = empty_strided_cuda((), (), torch.float32) buf6 = buf2 del buf2 buf18 = buf16 del buf16 buf23 = buf22 del buf22 buf24 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_add_bitwise_and_clamp_clone_div_gt_le_log_mean_mse_loss_mul_neg_ones_like_reciprocal_rsub_sub_sum_0[ grid(1)](buf14, buf18, buf6, buf23, arg4_1, arg3_1, arg1_1, arg2_1, buf8, arg0_1, arg6_1, arg5_1, buf24, 1, 256, num_warps= 2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 del arg5_1 del arg6_1 del buf14 del buf8 return buf24, buf6, buf18, buf23 def binary_logistic_regression_loss(reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-05): """Binary Logistic Regression Loss.""" label = label.view(-1) reg_score = reg_score.contiguous().view(-1) pmask = (label > threshold).float() num_positive = max(torch.sum(pmask), 1) num_entries = len(label) ratio = num_entries / num_positive ratio = min(max(ratio, ratio_range[0]), ratio_range[1]) coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio loss = coef_1 * pmask * torch.log(reg_score + eps) + coef_0 * (1.0 - pmask ) * torch.log(1.0 - reg_score + eps) loss = -torch.mean(loss) return loss class BMNLossNew(nn.Module): """BMN Loss. From paper https://arxiv.org/abs/1907.09702, code https://github.com/JJBOY/BMN-Boundary-Matching-Network. It will calculate loss for BMN Model. This loss is a weighted sum of 1) temporal evaluation loss based on confidence score of start and end positions. 2) proposal evaluation regression loss based on confidence scores of candidate proposals. 3) proposal evaluation classification loss based on classification results of candidate proposals. """ @staticmethod def tem_loss(pred_start, pred_end, gt_start, gt_end): """Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted start score by BMN model. pred_end (torch.Tensor): Predicted end score by BMN model. gt_start (torch.Tensor): Groundtruth confidence score for start. gt_end (torch.Tensor): Groundtruth confidence score for end. Returns: torch.Tensor: Returned binary logistic loss. """ loss_start = binary_logistic_regression_loss(pred_start, gt_start) loss_end = binary_logistic_regression_loss(pred_end, gt_end) loss = loss_start + loss_end return loss @staticmethod def pem_reg_loss(pred_score, gt_iou_map, mask, high_temporal_iou_threshold=0.7, low_temporal_iou_threshold=0.3): """Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. high_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.7. low_temporal_iou_threshold (float): Higher threshold of temporal_iou. Default: 0.3. Returns: torch.Tensor: Proposal evalutaion regression loss. """ u_hmask = (gt_iou_map > high_temporal_iou_threshold).float() u_mmask = ((gt_iou_map <= high_temporal_iou_threshold) & ( gt_iou_map > low_temporal_iou_threshold)).float() u_lmask = ((gt_iou_map <= low_temporal_iou_threshold) & (gt_iou_map > 0.0)).float() u_lmask = u_lmask * mask num_h = torch.sum(u_hmask) num_m = torch.sum(u_mmask) num_l = torch.sum(u_lmask) r_m = num_h / num_m u_smmask = torch.rand_like(gt_iou_map) u_smmask = u_mmask * u_smmask u_smmask = (u_smmask > 1.0 - r_m).float() r_l = num_h / num_l u_slmask = torch.rand_like(gt_iou_map) u_slmask = u_lmask * u_slmask u_slmask = (u_slmask > 1.0 - r_l).float() weights = u_hmask + u_smmask + u_slmask loss = F.mse_loss(pred_score * weights, gt_iou_map * weights) loss = 0.5 * torch.sum(loss * torch.ones_like(weights)) / torch.sum( weights) return loss @staticmethod def pem_cls_loss(pred_score, gt_iou_map, mask, threshold=0.9, ratio_range=(1.05, 21), eps=1e-05): """Calculate Proposal Evaluation Module Classification Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. threshold (float): Threshold of temporal_iou for positive instances. Default: 0.9. ratio_range (tuple): Lower bound and upper bound for ratio. Default: (1.05, 21) eps (float): Epsilon for small value. Default: 1e-5 Returns: torch.Tensor: Proposal evalutaion classification loss. """ pmask = (gt_iou_map > threshold).float() nmask = (gt_iou_map <= threshold).float() nmask = nmask * mask num_positive = max(torch.sum(pmask), 1) num_entries = num_positive + torch.sum(nmask) ratio = num_entries / num_positive ratio = torch.clamp(ratio, ratio_range[0], ratio_range[1]) coef_0 = 0.5 * ratio / (ratio - 1) coef_1 = 0.5 * ratio loss_pos = coef_1 * torch.log(pred_score + eps) * pmask loss_neg = coef_0 * torch.log(1.0 - pred_score + eps) * nmask loss = -1 * torch.sum(loss_pos + loss_neg) / num_entries return loss def forward(self, input_0, input_1, input_2, input_3, input_4, input_5, input_6): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 arg4_1 = input_4 arg5_1 = input_5 arg6_1 = input_6 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1]) return output[0], output[1], output[2], output[3]
Viditagarwal7479/Video-Swin-Transformer
BMNLoss
false
18,064
[ "Apache-2.0" ]
9
37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
https://github.com/Viditagarwal7479/Video-Swin-Transformer/tree/37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
LanguageModelCriterion
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xo/cxo4wyawrjir6doqel6gzniveitf2ichrwk7c55f2kj25tyq4eq3.py # Topologically Sorted Source Nodes: [gather, neg, output, sum_1, sum_2, output_1], Original ATen: [aten.gather, aten.neg, aten.mul, aten.sum, aten.div] # Source node to ATen node mapping: # gather => gather # neg => neg # output => mul # output_1 => div # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%view, 1, %view_1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%gather,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %view_2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_div_gather_mul_neg_sum_0 = async_compile.triton('triton_per_fused_div_gather_mul_neg_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_gather_mul_neg_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp9 = tl.load(in_ptr2 + (r0), None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r0)), None, eviction_policy='evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [gather, neg, output, sum_1, sum_2, output_1], Original ATen: [aten.gather, aten.neg, aten.mul, aten.sum, aten.div] stream0 = get_raw_stream(0) triton_per_fused_div_gather_mul_neg_sum_0.run(buf2, arg1_1, arg0_1, arg2_1, 1, 16, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 del arg2_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.int64) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import * import torch.nn.init def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class LanguageModelCriterion(nn.Module): def __init__(self): super(LanguageModelCriterion, self).__init__() def forward(self, input, target, mask): target = target[:, :input.size(1)] mask = mask[:, :input.size(1)] input = to_contiguous(input).view(-1, input.size(2)) target = to_contiguous(target).view(-1, 1) mask = to_contiguous(mask).view(-1, 1) output = -input.gather(1, target) * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_gather_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_gather_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, def to_contiguous(tensor): if tensor.is_contiguous(): return tensor else: return tensor.contiguous() class LanguageModelCriterionNew(nn.Module): def __init__(self): super(LanguageModelCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
WuJie1010/Fine-Grained-Image-Captioning
LanguageModelCriterion
false
18,065
[ "MIT" ]
9
340bc1868634f3bf0fdd62d439fec32ee1b45407
https://github.com/WuJie1010/Fine-Grained-Image-Captioning/tree/340bc1868634f3bf0fdd62d439fec32ee1b45407
ImgAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/nr/cnr4bp7qqvtxmwuqhn5hgelnallkmsyvddl3djy3tdx4v5w7p6eu.py # Topologically Sorted Source Nodes: [dot, dot_1], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # dot => add # dot_1 => tanh # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %expand), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/lj/cljtifj5s2fjrx6b5sqp5x6vu22itarysil42cuuuismtv6z3h75.py # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] # Source node to ATen node mapping: # weight => amax, div, exp, sub, sum_1 # Graph fragment: # %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot, dot_1], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(primals_2, buf0, primals_4, buf1, 256, grid=grid(256), stream=stream0) del primals_2 del primals_4 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [weight], Original ATen: [aten._softmax] triton_per_fused__softmax_1.run(buf3, buf4, buf5, buf6, 4, 16, grid=grid(4), stream=stream0) buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 4, 1), 0), out=buf7) del buf6 return (reinterpret_tensor(buf7, (4, 4), (4, 1), 0), primals_5, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_1, (4, 4, 16), (64, 1, 4), 0), primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import * import torch.nn.init class ImgAttention(nn.Module): def __init__(self, opt): super(ImgAttention, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.h2att = nn.Linear(self.rnn_size, self.att_hid_size) self.alpha_net = nn.Linear(self.att_hid_size, 1) def forward(self, h, att_feats, p_att_feats): att_size = att_feats.numel() // att_feats.size(0) // self.rnn_size att = p_att_feats.view(-1, att_size, self.att_hid_size) att_h = self.h2att(h) att_h = att_h.unsqueeze(1).expand_as(att) dot = att + att_h dot = F.tanh(dot) dot = dot.view(-1, self.att_hid_size) dot = self.alpha_net(dot) dot = dot.view(-1, att_size) weight = F.softmax(dot) att_feats_ = att_feats.view(-1, att_size, self.rnn_size) att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) return att_res def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(rnn_size=4, att_hid_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from torch.autograd import * import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tl.store(out_ptr0 + x3, tmp5, xmask) @triton.jit def triton_per_fused__softmax_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_5, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](primals_2, buf0, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_4 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_per_fused__softmax_1[grid(4)](buf3, buf4, buf5, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) buf7 = reinterpret_tensor(buf0, (4, 1, 4), (4, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 16), (16, 0, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 4, 1), 0), out=buf7) del buf6 return reinterpret_tensor(buf7, (4, 4), (4, 1), 0 ), primals_5, buf1, buf3, buf4, buf5, reinterpret_tensor(primals_1, (4, 4, 16), (64, 1, 4), 0), primals_6 class ImgAttentionNew(nn.Module): def __init__(self, opt): super(ImgAttentionNew, self).__init__() self.rnn_size = opt.rnn_size self.att_hid_size = opt.att_hid_size self.h2att = nn.Linear(self.rnn_size, self.att_hid_size) self.alpha_net = nn.Linear(self.att_hid_size, 1) def forward(self, input_0, input_1, input_2): primals_3 = self.h2att.weight primals_4 = self.h2att.bias primals_6 = self.alpha_net.weight primals_7 = self.alpha_net.bias primals_5 = input_0 primals_1 = input_1 primals_2 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
WuJie1010/Fine-Grained-Image-Captioning
ImgAttention
false
18,066
[ "MIT" ]
9
340bc1868634f3bf0fdd62d439fec32ee1b45407
https://github.com/WuJie1010/Fine-Grained-Image-Captioning/tree/340bc1868634f3bf0fdd62d439fec32ee1b45407
UNET
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/7u/c7u6iaocgtyp5573tzlkqdfaj7b5q4zs5bmbid6xcef5aet6rcwm.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16516096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64516) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wt/cwt7252yb2a4ws2xapmbx6t6fsriyrfonxihh5xvodkdvdjln4re.py # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_2 => convolution_1 # x_3 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16777216], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16257024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 63504) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xb/cxbh7tyjcsoxf4uuj4sis5j75xx4jxnjtlnmobvroso4vyaozyko.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_4 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4064256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 126 x3 = (xindex // 126) x2 = (xindex // 15876) x4 = xindex % 15876 tmp0 = tl.load(in_ptr0 + ((2*x0) + (504*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (504*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (252 + (2*x0) + (504*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (253 + (2*x0) + (504*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (15904*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (16000*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ju/cju5nwjyh33ybhnxlgmhm7dr674wv6s7aji6vet5vmhpzsvc33cm.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_5 => convolution_2 # Graph fragment: # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7872512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 15376) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/y4/cy4wa7bjmybpxg2vmpgw7wrtnfsakdmgnfjlylvxs3xql35hongx.py # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_6 => convolution_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8388608], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7620608 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 14884) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/d6/cd6awclezob6vzaaryay6navamhigv2i3gpt6ean3xopu7br3fyg.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_7 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1905152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 61 x3 = (xindex // 61) x2 = (xindex // 3721) x4 = xindex % 3721 tmp0 = tl.load(in_ptr0 + ((2*x0) + (244*x3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (244*x3)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (122 + (2*x0) + (244*x3)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (123 + (2*x0) + (244*x3)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + (3744*x2)), tmp6, xmask) tl.store(out_ptr1 + (x4 + (3840*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/u6/cu6lk3tuykhngq3ydx2vz5x26wgynvud7ktaytjjvgm2prtvwgao.py # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_8 => convolution_4 # Graph fragment: # %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3564544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3481) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hf/chffhiegmnc75hhlotxmw6wx2xazpyycarkv6ytneidgqzaognev.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_9 => convolution_5 # Graph fragment: # %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_4, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3326976 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3249) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ka/ckandy7q6p2tk2il7xrpzuzff73y6hzrbcesb3niidf2tauxhmml.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_10 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 802816 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 28 x1 = (xindex // 28) % 28 x2 = (xindex // 784) x3 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (57 + (2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (58 + (2*x0) + (114*x1) + (3249*x2)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vb/cvbld6hhrlsf47xnsofkfpaxuatuomyg6desj3xz5ofyeklb52gi.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_11 => convolution_6 # Graph fragment: # %convolution_6 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_9 = async_compile.triton('triton_poi_fused_convolution_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1384448 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 676) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xp/cxpvurh74knviz4isoyblhg22xoapnqjzvz2lpx5jkzvev5iqxxg.py # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_12 => convolution_7 # Graph fragment: # %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_6, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1179648 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 576) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nv/cnvic4yqot2fg7x6big55fhcwkbcru6qlyx7o4az3got23l3fqw3.py # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_13 => getitem_6, getitem_7 # Graph fragment: # %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_11 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 294912 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x2), tmp6, None) tl.store(out_ptr1 + (x2), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gc/cgc5lwh3qmmhu3jwp6wazksvwf7x62onhnagdjkynkb3xacbmtdi.py # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_14 => convolution_8 # Graph fragment: # %convolution_8 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_12 = async_compile.triton('triton_poi_fused_convolution_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 409600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 100) % 1024 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/35/c35cxafywf4wgy7bzno52roiyw6aqkcupawwvaskljhar44clyzg.py # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_15 => convolution_9 # Graph fragment: # %convolution_9 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 64) % 1024 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/b5/cb5t43xb3xg5m4eg2jaqc7pds3ajzh7623iywwn6t6rlratnnthg.py # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_17 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_10, %slice_4], 1), kwargs = {}) triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 256) % 1024 x3 = (xindex // 262144) x4 = xindex % 256 x0 = xindex % 16 x1 = (xindex // 16) % 16 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (256*x2) + (131072*x3)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x2), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 1024, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (100 + x0 + (24*x1) + (576*((-512) + x2)) + (294912*x3)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x5), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3h/c3hq6l4bayjuvqftaredmc4n6cub4bcfhmnfdpkf7jpr427p6rdz.py # Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_18 => convolution_11 # Graph fragment: # %convolution_11 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_24, %primals_25, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_15 = async_compile.triton('triton_poi_fused_convolution_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 401408 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 196) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mt/cmtxr35lldpppikjwtparheofn2vvk5cmmhrzs3n76t7r6c4ogir.py # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_19 => convolution_12 # Graph fragment: # %convolution_12 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_11, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_16 = async_compile.triton('triton_poi_fused_convolution_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 294912 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 144) % 512 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/g2/cg2re5mkuqa6j3jvt6exxa5t47wzw524dzkv3fxsdsjnhl35ljgq.py # Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_21 => cat_1 # Graph fragment: # %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_13, %slice_8], 1), kwargs = {}) triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1179648 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 576) % 512 x3 = (xindex // 294912) x4 = xindex % 576 x0 = xindex % 24 x1 = (xindex // 24) % 24 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (576*x2) + (147456*x3)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x2), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 512, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (928 + x0 + (57*x1) + (3249*((-256) + x2)) + (831744*x3)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x5), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ct/cctkjvxbcaot3733dopeazaxxf6nnzfd737difmz43fsi2ln6d5f.py # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_22 => convolution_14 # Graph fragment: # %convolution_14 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_28, %primals_29, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_18 = async_compile.triton('triton_poi_fused_convolution_18', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 495616 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 484) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vb/cvbgdy3prcylcmbnpr2nthx27npu2sihk3nicwwrjcsnw2m5eage.py # Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_23 => convolution_15 # Graph fragment: # %convolution_15 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_14, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_19 = async_compile.triton('triton_poi_fused_convolution_19', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 409600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 400) % 256 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/yb/cybbhkmjk2w2vardydyiietogerjg3kknpfv6avgo56ss2idda7a.py # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_25 => cat_2 # Graph fragment: # %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %slice_12], 1), kwargs = {}) triton_poi_fused_cat_20 = async_compile.triton('triton_poi_fused_cat_20', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1638400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 1600) % 256 x3 = (xindex // 409600) x4 = xindex % 1600 x0 = xindex % 40 x1 = (xindex // 40) % 40 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (1600*x2) + (204800*x3)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x2), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 256, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (5043 + x0 + (122*x1) + (14884*((-128) + x2)) + (1905152*x3)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x5), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/3k/c3kz5cxothlsmuf3dv2uv5b4evf5uj5x6qsxm4ijf27anfxuz55i.py # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_26 => convolution_17 # Graph fragment: # %convolution_17 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_32, %primals_33, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_21 = async_compile.triton('triton_poi_fused_convolution_21', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 739328 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1444) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/us/cushwdp2umjxi2zot2cyi5ecsvbbdzqyf2hxdjpecubeag7ow4mn.py # Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_27 => convolution_18 # Graph fragment: # %convolution_18 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_17, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_22 = async_compile.triton('triton_poi_fused_convolution_22', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_22', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 663552 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 1296) % 128 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hv/chvfu7iwekwd6vpnemnxcnx6nltn3aqsjp6jptv7tphdajvauqnz.py # Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.cat] # Source node to ATen node mapping: # x_29 => cat_3 # Graph fragment: # %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_19, %slice_20], 1), kwargs = {}) triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2654208 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = (xindex // 5184) % 128 x3 = (xindex // 663552) x4 = xindex % 5184 x0 = xindex % 72 x1 = (xindex // 72) % 72 x5 = xindex tmp0 = x2 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + (5184*x2) + (331776*x3)), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + (x2), tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tmp11 = tl.full([1], 128, tl.int64) tmp12 = tmp0 < tmp11 tmp13 = tl.load(in_ptr2 + (22770 + x0 + (252*x1) + (63504*((-64) + x2)) + (4064256*x3)), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + (x5), tmp14, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/b5/cb5szadifdrykcwjpm2u3r5fb55jtoorln54fwxofthziflpywja.py # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_30 => convolution_20 # Graph fragment: # %convolution_20 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_36, %primals_37, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_24 = async_compile.triton('triton_poi_fused_convolution_24', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1254400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4900) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/at/cattywxbzm7ds3knqfq6kazklutwpqmw4c5cn2hmzv5o4a4kw4eo.py # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_31 => convolution_21 # Graph fragment: # %convolution_21 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_20, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_25 = async_compile.triton('triton_poi_fused_convolution_25', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_25', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_25(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1183744 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4624) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5t/c5tjkcoyejbssr6s7c3jpbrfuzwujp6xxvaxo3zz3bhveqgeu4op.py # Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_32 => convolution_22 # Graph fragment: # %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_21, %primals_38, %primals_39, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_26 = async_compile.triton('triton_poi_fused_convolution_26', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_26', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_26(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 73984 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4624) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39 = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256, ), (1, )) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (512, ), (1, )) assert_size_stride(primals_16, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (512, ), (1, )) assert_size_stride(primals_18, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (1024, ), (1, )) assert_size_stride(primals_20, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_21, (1024, ), (1, )) assert_size_stride(primals_22, (1024, 512, 2, 2), (2048, 4, 2, 1)) assert_size_stride(primals_23, (512, ), (1, )) assert_size_stride(primals_24, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_25, (512, ), (1, )) assert_size_stride(primals_26, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_27, (256, ), (1, )) assert_size_stride(primals_28, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (256, ), (1, )) assert_size_stride(primals_30, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_31, (128, ), (1, )) assert_size_stride(primals_32, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_33, (128, ), (1, )) assert_size_stride(primals_34, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_35, (64, ), (1, )) assert_size_stride(primals_36, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (64, ), (1, )) assert_size_stride(primals_38, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_39, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 254, 254), (4129024, 64516, 254, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16516096, grid=grid(16516096), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 252, 252), (4064256, 63504, 252, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 16257024, grid=grid(16257024), stream=stream0) buf4 = empty_strided_cuda((4, 64, 126, 126), (1017856, 15904, 126, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 126, 126), (1024000, 16000, 126, 1), torch.int8) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 4064256, grid=grid(4064256), stream=stream0) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 124, 124), (1968128, 15376, 124, 1)) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] triton_poi_fused_convolution_3.run(buf7, primals_7, 7872512, grid=grid(7872512), stream=stream0) del primals_7 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 122, 122), (1905152, 14884, 122, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf9, primals_9, 7620608, grid=grid(7620608), stream=stream0) buf10 = empty_strided_cuda((4, 128, 61, 61), (479232, 3744, 61, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 61, 61), (491520, 3840, 61, 1), torch.int8) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 1905152, grid=grid(1905152), stream=stream0) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 59, 59), (891136, 3481, 59, 1)) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] triton_poi_fused_convolution_6.run(buf13, primals_11, 3564544, grid=grid(3564544), stream=stream0) del primals_11 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 57, 57), (831744, 3249, 57, 1)) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf15, primals_13, 3326976, grid=grid(3326976), stream=stream0) buf16 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1), torch.float32) buf17 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1), torch.int8) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf15, buf16, buf17, 802816, grid=grid(802816), stream=stream0) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 512, 26, 26), (346112, 676, 26, 1)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution] triton_poi_fused_convolution_9.run(buf19, primals_15, 1384448, grid=grid(1384448), stream=stream0) del primals_15 # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 24, 24), (294912, 576, 24, 1)) buf21 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution] triton_poi_fused_convolution_10.run(buf21, primals_17, 1179648, grid=grid(1179648), stream=stream0) buf22 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1), torch.float32) buf23 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1), torch.int8) # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_11.run(buf21, buf22, buf23, 294912, grid=grid(294912), stream=stream0) # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 1024, 10, 10), (102400, 100, 10, 1)) buf25 = buf24; del buf24 # reuse # Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution] triton_poi_fused_convolution_12.run(buf25, primals_19, 409600, grid=grid(409600), stream=stream0) del primals_19 # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 1024, 8, 8), (65536, 64, 8, 1)) buf27 = buf26; del buf26 # reuse # Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution] triton_poi_fused_convolution_13.run(buf27, primals_21, 262144, grid=grid(262144), stream=stream0) del primals_21 # Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution] buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 16, 16), (131072, 256, 16, 1)) buf29 = empty_strided_cuda((4, 1024, 16, 16), (262144, 256, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.cat] triton_poi_fused_cat_14.run(buf28, primals_23, buf21, buf29, 1048576, grid=grid(1048576), stream=stream0) del buf28 del primals_23 # Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution] buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 512, 14, 14), (100352, 196, 14, 1)) buf31 = buf30; del buf30 # reuse # Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution] triton_poi_fused_convolution_15.run(buf31, primals_25, 401408, grid=grid(401408), stream=stream0) del primals_25 # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] buf32 = extern_kernels.convolution(buf31, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 512, 12, 12), (73728, 144, 12, 1)) buf33 = buf32; del buf32 # reuse # Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution] triton_poi_fused_convolution_16.run(buf33, primals_17, 294912, grid=grid(294912), stream=stream0) del primals_17 # Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution] buf34 = extern_kernels.convolution(buf33, primals_26, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 24, 24), (147456, 576, 24, 1)) buf35 = empty_strided_cuda((4, 512, 24, 24), (294912, 576, 24, 1), torch.float32) # Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.cat] triton_poi_fused_cat_17.run(buf34, primals_27, buf15, buf35, 1179648, grid=grid(1179648), stream=stream0) del buf34 del primals_27 # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] buf36 = extern_kernels.convolution(buf35, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 256, 22, 22), (123904, 484, 22, 1)) buf37 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution] triton_poi_fused_convolution_18.run(buf37, primals_29, 495616, grid=grid(495616), stream=stream0) del primals_29 # Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution] buf38 = extern_kernels.convolution(buf37, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 256, 20, 20), (102400, 400, 20, 1)) buf39 = buf38; del buf38 # reuse # Topologically Sorted Source Nodes: [x_23], Original ATen: [aten.convolution] triton_poi_fused_convolution_19.run(buf39, primals_13, 409600, grid=grid(409600), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution] buf40 = extern_kernels.convolution(buf39, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 128, 40, 40), (204800, 1600, 40, 1)) buf41 = empty_strided_cuda((4, 256, 40, 40), (409600, 1600, 40, 1), torch.float32) # Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat] triton_poi_fused_cat_20.run(buf40, primals_31, buf9, buf41, 1638400, grid=grid(1638400), stream=stream0) del buf40 del primals_31 # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf41, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 128, 38, 38), (184832, 1444, 38, 1)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution] triton_poi_fused_convolution_21.run(buf43, primals_33, 739328, grid=grid(739328), stream=stream0) del primals_33 # Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf43, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 128, 36, 36), (165888, 1296, 36, 1)) buf45 = buf44; del buf44 # reuse # Topologically Sorted Source Nodes: [x_27], Original ATen: [aten.convolution] triton_poi_fused_convolution_22.run(buf45, primals_9, 663552, grid=grid(663552), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution] buf46 = extern_kernels.convolution(buf45, primals_34, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 64, 72, 72), (331776, 5184, 72, 1)) buf47 = empty_strided_cuda((4, 128, 72, 72), (663552, 5184, 72, 1), torch.float32) # Topologically Sorted Source Nodes: [x_29], Original ATen: [aten.cat] triton_poi_fused_cat_23.run(buf46, primals_35, buf3, buf47, 2654208, grid=grid(2654208), stream=stream0) del buf46 del primals_35 # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution] buf48 = extern_kernels.convolution(buf47, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 70, 70), (313600, 4900, 70, 1)) buf49 = buf48; del buf48 # reuse # Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution] triton_poi_fused_convolution_24.run(buf49, primals_37, 1254400, grid=grid(1254400), stream=stream0) del primals_37 # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution] buf50 = extern_kernels.convolution(buf49, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 64, 68, 68), (295936, 4624, 68, 1)) buf51 = buf50; del buf50 # reuse # Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.convolution] triton_poi_fused_convolution_25.run(buf51, primals_5, 1183744, grid=grid(1183744), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution] buf52 = extern_kernels.convolution(buf51, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 4, 68, 68), (18496, 4624, 68, 1)) buf53 = buf52; del buf52 # reuse # Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution] triton_poi_fused_convolution_26.run(buf53, primals_39, 73984, grid=grid(73984), stream=stream0) del primals_39 return (buf53, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 256, 256), (262144, 65536, 256, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_20 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_21 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_22 = rand_strided((1024, 512, 2, 2), (2048, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_24 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_26 = rand_strided((512, 256, 2, 2), (1024, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_28 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_30 = rand_strided((256, 128, 2, 2), (512, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_32 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_34 = rand_strided((128, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32) primals_35 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_36 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_37 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_38 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_39 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def concat(c1, c2): return torch.cat([c1, c2], dim=1) def conv1x1(in_c, out_c, k, s): return nn.ConvTranspose2d(in_c, out_c, kernel_size=k, stride=s) def conv3x3(in_c, out_c, k, s): return nn.Conv2d(in_c, out_c, kernel_size=k, stride=s) def cut(c1, c2): x1, y1 = c1.size()[2:] x2, y2 = c2.size()[2:] c2 = c2[:, :, int((x2 - x1) / 2):int((x1 + x2) / 2), int((y2 - y1) / 2) :int((y2 + y1) / 2)] return c2 def upconv2x2(in_c, out_c, k, s): return nn.ConvTranspose2d(in_c, out_c, kernel_size=k, stride=s) class UNET(nn.Module): def __init__(self, in_channels, n_classes): super(UNET, self).__init__() self.conv0 = conv3x3(in_channels, 64, 3, 1) self.relu = nn.ReLU(inplace=True) self.conv1 = conv3x3(64, 64, 3, 1) self.maxpool_0 = nn.MaxPool2d(kernel_size=2) self.conv2 = conv3x3(64, 128, 3, 1) self.conv3 = conv3x3(128, 128, 3, 1) self.conv4 = conv3x3(128, 256, 3, 1) self.conv5 = conv3x3(256, 256, 3, 1) self.conv6 = conv3x3(256, 512, 3, 1) self.conv7 = conv3x3(512, 512, 3, 1) self.conv8 = conv3x3(512, 1024, 3, 1) self.conv9 = conv3x3(1024, 1024, 3, 1) self.conv10 = conv3x3(1024, 512, 3, 1) self.conv11 = conv3x3(512, 256, 3, 1) self.conv12 = conv3x3(256, 128, 3, 1) self.conv13 = conv3x3(128, 64, 3, 1) self.conv14 = conv1x1(64, n_classes, 1, 1) self.upconv0 = upconv2x2(1024, 512, 2, 2) self.upconv1 = upconv2x2(512, 256, 2, 2) self.upconv2 = upconv2x2(256, 128, 2, 2) self.upconv3 = upconv2x2(128, 64, 2, 2) def forward(self, x): x = self.conv0(x) x = self.relu(x) x = self.conv1(x) x = self.relu(x) stage1 = x x = self.maxpool_0(x) x = self.conv2(x) x = self.conv3(x) stage2 = x x = self.maxpool_0(x) x = self.conv4(x) x = self.conv5(x) stage3 = x x = self.maxpool_0(x) x = self.conv6(x) x = self.conv7(x) stage4 = x x = self.maxpool_0(x) x = self.conv8(x) x = self.conv9(x) x = self.upconv0(x) stage4 = cut(x, stage4) x = concat(x, stage4) x = self.conv10(x) x = self.conv7(x) x = self.upconv1(x) stage3 = cut(x, stage3) x = concat(x, stage3) x = self.conv11(x) x = self.conv5(x) x = self.upconv2(x) stage2 = cut(x, stage2) x = concat(x, stage2) x = self.conv12(x) x = self.conv3(x) x = self.upconv3(x) stage1 = cut(x, stage1) x = concat(x, stage1) x = self.conv13(x) x = self.conv1(x) x = self.conv14(x) return x def get_inputs(): return [torch.rand([4, 4, 256, 256])] def get_init_inputs(): return [[], {'in_channels': 4, 'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16516096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64516 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 63504 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4064256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 126 x3 = xindex // 126 x2 = xindex // 15876 x4 = xindex % 15876 tmp0 = tl.load(in_ptr0 + (2 * x0 + 504 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 504 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (252 + 2 * x0 + 504 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (253 + 2 * x0 + 504 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 15904 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 16000 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 15376 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 14884 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1905152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 61 x3 = xindex // 61 x2 = xindex // 3721 x4 = xindex % 3721 tmp0 = tl.load(in_ptr0 + (2 * x0 + 244 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 244 * x3), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (122 + 2 * x0 + 244 * x3), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (123 + 2 * x0 + 244 * x3), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x4 + 3744 * x2), tmp6, xmask) tl.store(out_ptr1 + (x4 + 3840 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 3564544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3481 % 256 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 3326976 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3249 % 256 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 28 x1 = xindex // 28 % 28 x2 = xindex // 784 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 114 * x1 + 3249 * x2), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 114 * x1 + 3249 * x2), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (57 + 2 * x0 + 114 * x1 + 3249 * x2), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (58 + 2 * x0 + 114 * x1 + 3249 * x2), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 676 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 576 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_11(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x2, tmp6, None) tl.store(out_ptr1 + x2, tmp16, None) @triton.jit def triton_poi_fused_convolution_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 100 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 1024 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 256 % 1024 x3 = xindex // 262144 x4 = xindex % 256 x0 = xindex % 16 x1 = xindex // 16 % 16 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 256 * x2 + 131072 * x3), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x2, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 1024, tl.int64) tmp13 = tl.load(in_ptr2 + (100 + x0 + 24 * x1 + 576 * (-512 + x2) + 294912 * x3), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 196 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 144 % 512 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 576 % 512 x3 = xindex // 294912 x4 = xindex % 576 x0 = xindex % 24 x1 = xindex // 24 % 24 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 576 * x2 + 147456 * x3), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x2, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 512, tl.int64) tmp13 = tl.load(in_ptr2 + (928 + x0 + 57 * x1 + 3249 * (-256 + x2) + 831744 * x3), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 484 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 400 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 1600 % 256 x3 = xindex // 409600 x4 = xindex % 1600 x0 = xindex % 40 x1 = xindex // 40 % 40 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 1600 * x2 + 204800 * x3), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x2, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 256, tl.int64) tmp13 = tl.load(in_ptr2 + (5043 + x0 + 122 * x1 + 14884 * (-128 + x2) + 1905152 * x3), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1444 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_22(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1296 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 5184 % 128 x3 = xindex // 663552 x4 = xindex % 5184 x0 = xindex % 72 x1 = xindex // 72 % 72 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 5184 * x2 + 331776 * x3), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x2, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 128, tl.int64) tmp13 = tl.load(in_ptr2 + (22770 + x0 + 252 * x1 + 63504 * (-64 + x2) + 4064256 * x3), tmp10, other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tl.store(out_ptr0 + x5, tmp14, None) @triton.jit def triton_poi_fused_convolution_24(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1254400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4900 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_25(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4624 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_26(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 73984 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4624 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 256, 256), (262144, 65536, 256, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (512, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_17, (512,), (1,)) assert_size_stride(primals_18, (1024, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_19, (1024,), (1,)) assert_size_stride(primals_20, (1024, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_21, (1024,), (1,)) assert_size_stride(primals_22, (1024, 512, 2, 2), (2048, 4, 2, 1)) assert_size_stride(primals_23, (512,), (1,)) assert_size_stride(primals_24, (512, 1024, 3, 3), (9216, 9, 3, 1)) assert_size_stride(primals_25, (512,), (1,)) assert_size_stride(primals_26, (512, 256, 2, 2), (1024, 4, 2, 1)) assert_size_stride(primals_27, (256,), (1,)) assert_size_stride(primals_28, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_29, (256,), (1,)) assert_size_stride(primals_30, (256, 128, 2, 2), (512, 4, 2, 1)) assert_size_stride(primals_31, (128,), (1,)) assert_size_stride(primals_32, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_33, (128,), (1,)) assert_size_stride(primals_34, (128, 64, 2, 2), (256, 4, 2, 1)) assert_size_stride(primals_35, (64,), (1,)) assert_size_stride(primals_36, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_37, (64,), (1,)) assert_size_stride(primals_38, (64, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_39, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 254, 254), (4129024, 64516, 254, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16516096)](buf1, primals_2, 16516096, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 252, 252), (4064256, 63504, 252, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(16257024)](buf3, primals_5, 16257024, XBLOCK=1024, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 64, 126, 126), (1017856, 15904, 126, 1), torch.float32) buf5 = empty_strided_cuda((4, 64, 126, 126), (1024000, 16000, 126, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_2[grid(4064256)](buf3, buf4, buf5, 4064256, XBLOCK=512, num_warps=8, num_stages=1) buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 128, 124, 124), (1968128, 15376, 124, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_3[grid(7872512)](buf7, primals_7, 7872512, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 128, 122, 122), (1905152, 14884, 122, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(7620608)](buf9, primals_9, 7620608, XBLOCK=1024, num_warps=4, num_stages=1) buf10 = empty_strided_cuda((4, 128, 61, 61), (479232, 3744, 61, 1), torch.float32) buf11 = empty_strided_cuda((4, 128, 61, 61), (491520, 3840, 61, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_5[grid(1905152)](buf9, buf10, buf11, 1905152, XBLOCK=512, num_warps=8, num_stages=1) buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 59, 59), (891136, 3481, 59, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_6[grid(3564544)](buf13, primals_11, 3564544, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 256, 57, 57), (831744, 3249, 57, 1)) buf15 = buf14 del buf14 triton_poi_fused_convolution_7[grid(3326976)](buf15, primals_13, 3326976, XBLOCK=1024, num_warps=4, num_stages=1) buf16 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1), torch.float32) buf17 = empty_strided_cuda((4, 256, 28, 28), (200704, 784, 28, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(802816)](buf15, buf16, buf17, 802816, XBLOCK=512, num_warps=8, num_stages=1) buf18 = extern_kernels.convolution(buf16, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 512, 26, 26), (346112, 676, 26, 1)) buf19 = buf18 del buf18 triton_poi_fused_convolution_9[grid(1384448)](buf19, primals_15, 1384448, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf20 = extern_kernels.convolution(buf19, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 24, 24), (294912, 576, 24, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_10[grid(1179648)](buf21, primals_17, 1179648, XBLOCK=1024, num_warps=4, num_stages=1) buf22 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1), torch.float32) buf23 = empty_strided_cuda((4, 512, 12, 12), (73728, 144, 12, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_11[grid(294912)](buf21, buf22, buf23, 294912, XBLOCK=512, num_warps=8, num_stages=1) buf24 = extern_kernels.convolution(buf22, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 1024, 10, 10), (102400, 100, 10, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_12[grid(409600)](buf25, primals_19, 409600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf26 = extern_kernels.convolution(buf25, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 1024, 8, 8), (65536, 64, 8, 1)) buf27 = buf26 del buf26 triton_poi_fused_convolution_13[grid(262144)](buf27, primals_21, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_21 buf28 = extern_kernels.convolution(buf27, primals_22, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf28, (4, 512, 16, 16), (131072, 256, 16, 1)) buf29 = empty_strided_cuda((4, 1024, 16, 16), (262144, 256, 16, 1), torch.float32) triton_poi_fused_cat_14[grid(1048576)](buf28, primals_23, buf21, buf29, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del buf28 del primals_23 buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf30, (4, 512, 14, 14), (100352, 196, 14, 1)) buf31 = buf30 del buf30 triton_poi_fused_convolution_15[grid(401408)](buf31, primals_25, 401408, XBLOCK=1024, num_warps=4, num_stages=1) del primals_25 buf32 = extern_kernels.convolution(buf31, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf32, (4, 512, 12, 12), (73728, 144, 12, 1)) buf33 = buf32 del buf32 triton_poi_fused_convolution_16[grid(294912)](buf33, primals_17, 294912, XBLOCK=1024, num_warps=4, num_stages=1) del primals_17 buf34 = extern_kernels.convolution(buf33, primals_26, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf34, (4, 256, 24, 24), (147456, 576, 24, 1)) buf35 = empty_strided_cuda((4, 512, 24, 24), (294912, 576, 24, 1), torch.float32) triton_poi_fused_cat_17[grid(1179648)](buf34, primals_27, buf15, buf35, 1179648, XBLOCK=1024, num_warps=4, num_stages=1) del buf34 del primals_27 buf36 = extern_kernels.convolution(buf35, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 256, 22, 22), (123904, 484, 22, 1)) buf37 = buf36 del buf36 triton_poi_fused_convolution_18[grid(495616)](buf37, primals_29, 495616, XBLOCK=1024, num_warps=4, num_stages=1) del primals_29 buf38 = extern_kernels.convolution(buf37, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf38, (4, 256, 20, 20), (102400, 400, 20, 1)) buf39 = buf38 del buf38 triton_poi_fused_convolution_19[grid(409600)](buf39, primals_13, 409600, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf40 = extern_kernels.convolution(buf39, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf40, (4, 128, 40, 40), (204800, 1600, 40, 1)) buf41 = empty_strided_cuda((4, 256, 40, 40), (409600, 1600, 40, 1), torch.float32) triton_poi_fused_cat_20[grid(1638400)](buf40, primals_31, buf9, buf41, 1638400, XBLOCK=1024, num_warps=4, num_stages=1) del buf40 del primals_31 buf42 = extern_kernels.convolution(buf41, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 128, 38, 38), (184832, 1444, 38, 1)) buf43 = buf42 del buf42 triton_poi_fused_convolution_21[grid(739328)](buf43, primals_33, 739328, XBLOCK=1024, num_warps=4, num_stages=1) del primals_33 buf44 = extern_kernels.convolution(buf43, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 128, 36, 36), (165888, 1296, 36, 1)) buf45 = buf44 del buf44 triton_poi_fused_convolution_22[grid(663552)](buf45, primals_9, 663552, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf46 = extern_kernels.convolution(buf45, primals_34, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf46, (4, 64, 72, 72), (331776, 5184, 72, 1)) buf47 = empty_strided_cuda((4, 128, 72, 72), (663552, 5184, 72, 1), torch.float32) triton_poi_fused_cat_23[grid(2654208)](buf46, primals_35, buf3, buf47, 2654208, XBLOCK=1024, num_warps=4, num_stages=1) del buf46 del primals_35 buf48 = extern_kernels.convolution(buf47, primals_36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf48, (4, 64, 70, 70), (313600, 4900, 70, 1)) buf49 = buf48 del buf48 triton_poi_fused_convolution_24[grid(1254400)](buf49, primals_37, 1254400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_37 buf50 = extern_kernels.convolution(buf49, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf50, (4, 64, 68, 68), (295936, 4624, 68, 1)) buf51 = buf50 del buf50 triton_poi_fused_convolution_25[grid(1183744)](buf51, primals_5, 1183744, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf52 = extern_kernels.convolution(buf51, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf52, (4, 4, 68, 68), (18496, 4624, 68, 1)) buf53 = buf52 del buf52 triton_poi_fused_convolution_26[grid(73984)](buf53, primals_39, 73984, XBLOCK=1024, num_warps=4, num_stages=1) del primals_39 return (buf53, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf16, buf17, buf19, buf21, buf22, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf39, buf41, buf43, buf45, buf47, buf49, buf51) def concat(c1, c2): return torch.cat([c1, c2], dim=1) def conv1x1(in_c, out_c, k, s): return nn.ConvTranspose2d(in_c, out_c, kernel_size=k, stride=s) def conv3x3(in_c, out_c, k, s): return nn.Conv2d(in_c, out_c, kernel_size=k, stride=s) def cut(c1, c2): x1, y1 = c1.size()[2:] x2, y2 = c2.size()[2:] c2 = c2[:, :, int((x2 - x1) / 2):int((x1 + x2) / 2), int((y2 - y1) / 2) :int((y2 + y1) / 2)] return c2 def upconv2x2(in_c, out_c, k, s): return nn.ConvTranspose2d(in_c, out_c, kernel_size=k, stride=s) class UNETNew(nn.Module): def __init__(self, in_channels, n_classes): super(UNETNew, self).__init__() self.conv0 = conv3x3(in_channels, 64, 3, 1) self.relu = nn.ReLU(inplace=True) self.conv1 = conv3x3(64, 64, 3, 1) self.maxpool_0 = nn.MaxPool2d(kernel_size=2) self.conv2 = conv3x3(64, 128, 3, 1) self.conv3 = conv3x3(128, 128, 3, 1) self.conv4 = conv3x3(128, 256, 3, 1) self.conv5 = conv3x3(256, 256, 3, 1) self.conv6 = conv3x3(256, 512, 3, 1) self.conv7 = conv3x3(512, 512, 3, 1) self.conv8 = conv3x3(512, 1024, 3, 1) self.conv9 = conv3x3(1024, 1024, 3, 1) self.conv10 = conv3x3(1024, 512, 3, 1) self.conv11 = conv3x3(512, 256, 3, 1) self.conv12 = conv3x3(256, 128, 3, 1) self.conv13 = conv3x3(128, 64, 3, 1) self.conv14 = conv1x1(64, n_classes, 1, 1) self.upconv0 = upconv2x2(1024, 512, 2, 2) self.upconv1 = upconv2x2(512, 256, 2, 2) self.upconv2 = upconv2x2(256, 128, 2, 2) self.upconv3 = upconv2x2(128, 64, 2, 2) def forward(self, input_0): primals_1 = self.conv0.weight primals_2 = self.conv0.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_10 = self.conv4.weight primals_11 = self.conv4.bias primals_12 = self.conv5.weight primals_13 = self.conv5.bias primals_14 = self.conv6.weight primals_15 = self.conv6.bias primals_16 = self.conv7.weight primals_17 = self.conv7.bias primals_18 = self.conv8.weight primals_19 = self.conv8.bias primals_20 = self.conv9.weight primals_21 = self.conv9.bias primals_24 = self.conv10.weight primals_23 = self.conv10.bias primals_28 = self.conv11.weight primals_27 = self.conv11.bias primals_32 = self.conv12.weight primals_31 = self.conv12.bias primals_36 = self.conv13.weight primals_35 = self.conv13.bias primals_38 = self.conv14.weight primals_39 = self.conv14.bias primals_22 = self.upconv0.weight primals_25 = self.upconv0.bias primals_26 = self.upconv1.weight primals_29 = self.upconv1.bias primals_30 = self.upconv2.weight primals_33 = self.upconv2.bias primals_34 = self.upconv3.weight primals_37 = self.upconv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39]) return output[0]
TerenceChen95/Retina-Unet-Pytorch
UNET
false
18,067
[ "MIT" ]
5
fad5a9a0bcab5d81a0f1bb2537b9a2ead87828ca
https://github.com/TerenceChen95/Retina-Unet-Pytorch/tree/fad5a9a0bcab5d81a0f1bb2537b9a2ead87828ca
MNIST_CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/te/ctef7mzwyjkfyg3f43zkxcxrqjh56sog4eojwdtrxx5ecrksbhsb.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/dw/cdwg3ppwcltmzknvvr6p3li2zju7b6yerqzmhxb6t2qwpavsycku.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/bz/cbzdnqqc2snvhfncph6e66toigdghrocuedx2n4lbvm7kuhwxgl5.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 256 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + (64*x2) + (262144*y1)), tmp2, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/t4/ct45owzz2qkutlpdzxllijcyhlzq77ggenb6rjo3rqetlmpwc6jh.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_3 = async_compile.triton('triton_per_fused_native_group_norm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[8192, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 5, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 8192 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 32 x1 = (xindex // 32) % 64 x2 = (xindex // 2048) x4 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*((r3 + (128*x1)) % 4096)) + (262144*x2) + ((r3 + (128*x1)) // 4096)), None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + (x4), tmp10, None) tl.store(out_ptr1 + (x4), tmp15, None) tl.store(out_ptr2 + (x4), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/pl/cpl5krumdtgbbbedl2ms73gmkrigal33aw3uk55u4n4p2pr7inyo.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_4 = async_compile.triton('triton_per_fused_native_group_norm_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 128 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 32 x1 = (xindex // 32) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (32*r2) + (2048*x1)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (32*r2) + (2048*x1)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + (32*r2) + (2048*x1)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + (x3), tmp13, xmask) tl.store(out_ptr1 + (x3), tmp14, xmask) tl.store(out_ptr2 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/vp/cvpqbk2wbbaxqr3ktb6dkl4vbhcp44mzqxsnhqf6mfobvynopgdg.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused_native_group_norm_5 = async_compile.triton('triton_per_fused_native_group_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 32 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + (4*x0)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + (x0), tmp20, xmask) tl.store(out_ptr0 + (x0), tmp13, xmask) tl.store(out_ptr1 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/47/c47pjvotvkeexjsclrvzublgk7azo5unrbofopzsqnrmb2wd7mr4.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => add_1, mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) triton_poi_fused_native_group_norm_6 = async_compile.triton('triton_poi_fused_native_group_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = (xindex // 262144) tmp0 = tl.load(in_ptr0 + (x3), None) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 32768.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hq/chqqptul7a2nsw7lkc6eqmxsguyb7xrr25rw3dtxqxxj3usba26k.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4y/c4yv3eq5pxq6i7f3fc5qltw547x5lariwvib7urajqleuhj34ate.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_8 = async_compile.triton('triton_per_fused_native_group_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 5, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4096 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 16 x1 = (xindex // 16) % 64 x2 = (xindex // 1024) x4 = xindex tmp0 = tl.load(in_ptr0 + ((8*x0) + (128*((r3 + (128*x1)) % 1024)) + (131072*x2) + ((r3 + (128*x1)) // 1024)), None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + (x4), tmp10, None) tl.store(out_ptr1 + (x4), tmp15, None) tl.store(out_ptr2 + (x4), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/jn/cjnrnjonwtpn74shchrp743lqkvsgrv5v4n6p4ljgjat2lafblqz.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_9 = async_compile.triton('triton_per_fused_native_group_norm_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + (x3), tmp13, xmask) tl.store(out_ptr1 + (x3), tmp14, xmask) tl.store(out_ptr2 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/aj/cajkqpujy6uqzlhavlbvt2hmlqghqcscfodznafr2blwhiltrkpk.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => add_2, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_per_fused_native_group_norm_10 = async_compile.triton('triton_per_fused_native_group_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 32 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (2*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (2*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + (2*x0)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tmp16 = 16384.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + (x0), tmp20, xmask) tl.store(out_ptr0 + (x0), tmp13, xmask) tl.store(out_ptr1 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nu/cnubbwsjh7jtxssojoxcl6ynrdfkahrzw4ohwsi2d4jh7uaxvhxc.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => add_3, mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {}) triton_poi_fused_native_group_norm_11 = async_compile.triton('triton_poi_fused_native_group_norm_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7m/c7mabks53ql64vupzq5iumf6bpoobisrw3su6euhn7wyatfozc6b.py # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] # Source node to ATen node mapping: # x_11 => add_7, mul_7 # x_12 => mean # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_7, [-1, -2], True), kwargs = {}) triton_red_fused_mean_native_group_norm_12 = async_compile.triton('triton_red_fused_mean_native_group_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4096, 128], reduction_hint=ReductionHint.OUTER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mean_native_group_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mean_native_group_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4096 rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 128 x4 = (xindex // 128) x2 = (xindex // 1024) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') _tmp17 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (128*r3) + (4096*((r3 % 32) // 32)) + (16384*x4)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = _tmp17 + tmp16 _tmp17 = tl.where(rmask, tmp18, _tmp17) tmp17 = tl.sum(_tmp17, 1)[:, None] tl.store(out_ptr0 + (x5), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hq/chquaxguadjach5wcvd3x4uwdeqsh54s4qklr6msc6wsfb5vv273.py # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] # Source node to ATen node mapping: # x_11 => add_7, mul_7 # x_12 => mean # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_7, [-1, -2], True), kwargs = {}) triton_per_fused_mean_native_group_norm_13 = async_compile.triton('triton_per_fused_mean_native_group_norm_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[512, 8], reduction_hint=ReductionHint.OUTER_TINY, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_native_group_norm_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_native_group_norm_13(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 512 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 128 x1 = (xindex // 128) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*r2) + (1024*x1)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1024.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, ), (1, )) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, ), (1, )) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, ), (1, )) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128, ), (1, )) assert_size_stride(primals_16, (128, ), (1, )) assert_size_stride(primals_17, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_6, buf0, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_10, buf1, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_10 buf2 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_14, buf2, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_14 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf3, primals_2, buf4, 256, 4096, grid=grid(256, 4096), stream=stream0) del primals_2 buf5 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf6 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf7 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_3.run(buf4, buf5, buf6, buf7, 8192, 128, grid=grid(8192), stream=stream0) buf8 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf9 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf10 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_4.run(buf5, buf6, buf7, buf8, buf9, buf10, 128, 64, grid=grid(128), stream=stream0) del buf5 del buf6 del buf7 buf11 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf12 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf15 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_5.run(buf8, buf9, buf10, buf11, buf12, buf15, 32, 4, grid=grid(32), stream=stream0) del buf10 del buf8 del buf9 buf14 = reinterpret_tensor(buf3, (4, 64, 64, 64), (262144, 1, 4096, 64), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_6.run(buf4, buf11, buf12, primals_4, primals_5, buf14, 1048576, grid=grid(1048576), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf16 = extern_kernels.convolution(buf14, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf17, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 buf18 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf19 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf20 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_8.run(buf17, buf18, buf19, buf20, 4096, 128, grid=grid(4096), stream=stream0) buf21 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf22 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf23 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_9.run(buf18, buf19, buf20, buf21, buf22, buf23, 64, 64, grid=grid(64), stream=stream0) buf24 = buf12; del buf12 # reuse buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf28 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf21, buf22, buf23, buf24, buf25, buf28, 32, 2, grid=grid(32), stream=stream0) buf27 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_11.run(buf17, buf24, buf25, primals_8, primals_9, buf27, 524288, grid=grid(524288), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf29 = extern_kernels.convolution(buf27, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf30 = buf29; del buf29 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf30, primals_11, 524288, grid=grid(524288), stream=stream0) del primals_11 buf31 = buf20; del buf20 # reuse buf32 = buf19; del buf19 # reuse buf33 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_8.run(buf30, buf31, buf32, buf33, 4096, 128, grid=grid(4096), stream=stream0) buf34 = buf23; del buf23 # reuse buf35 = buf22; del buf22 # reuse buf36 = buf21; del buf21 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_9.run(buf31, buf32, buf33, buf34, buf35, buf36, 64, 64, grid=grid(64), stream=stream0) buf37 = buf25; del buf25 # reuse buf38 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf41 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf34, buf35, buf36, buf37, buf38, buf41, 32, 2, grid=grid(32), stream=stream0) buf40 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_11.run(buf30, buf37, buf38, primals_12, primals_13, buf40, 524288, grid=grid(524288), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf42 = extern_kernels.convolution(buf40, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf43 = buf42; del buf42 # reuse # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_7.run(buf43, primals_15, 524288, grid=grid(524288), stream=stream0) del primals_15 buf44 = buf33; del buf33 # reuse buf45 = buf32; del buf32 # reuse buf46 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_8.run(buf43, buf44, buf45, buf46, 4096, 128, grid=grid(4096), stream=stream0) buf47 = buf36; del buf36 # reuse buf48 = buf35; del buf35 # reuse buf49 = buf34; del buf34 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_9.run(buf44, buf45, buf46, buf47, buf48, buf49, 64, 64, grid=grid(64), stream=stream0) del buf44 del buf45 buf50 = buf38; del buf38 # reuse buf51 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf53 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf47, buf48, buf49, buf50, buf51, buf53, 32, 2, grid=grid(32), stream=stream0) del buf47 del buf48 del buf49 buf54 = reinterpret_tensor(buf46, (4, 128, 1, 1, 8), (1024, 1, 4096, 4096, 128), 0); del buf46 # reuse # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] triton_red_fused_mean_native_group_norm_12.run(buf43, buf50, buf51, primals_16, primals_17, buf54, 4096, 128, grid=grid(4096), stream=stream0) del buf51 del primals_17 buf55 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf56 = buf55; del buf55 # reuse # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] triton_per_fused_mean_native_group_norm_13.run(buf56, buf54, 512, 8, grid=grid(512), stream=stream0) del buf54 return (reinterpret_tensor(buf56, (4, 128), (128, 1), 0), primals_1, primals_3, primals_4, buf0, primals_8, buf1, primals_12, buf2, primals_16, buf4, buf14, reinterpret_tensor(buf11, (4, 8), (8, 1), 0), reinterpret_tensor(buf15, (4, 8), (8, 1), 0), buf17, buf27, reinterpret_tensor(buf24, (4, 8), (8, 1), 0), reinterpret_tensor(buf28, (4, 8), (8, 1), 0), buf30, buf40, reinterpret_tensor(buf37, (4, 8), (8, 1), 0), reinterpret_tensor(buf41, (4, 8), (8, 1), 0), buf43, reinterpret_tensor(buf50, (4, 8), (8, 1), 0), reinterpret_tensor(buf53, (4, 8), (8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class MNIST_CNN(nn.Module): def __init__(self): super(MNIST_CNN, self).__init__() self.conv1 = nn.Conv2d(1, 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.bn0(x) x = self.conv2(x) x = F.relu(x) x = self.bn1(x) x = self.conv3(x) x = F.relu(x) x = self.bn2(x) x = self.conv4(x) x = F.relu(x) x = self.bn3(x) x = self.avgpool(x) x = x.view(len(x), -1) return x def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp2, ymask) @triton.jit def triton_per_fused_native_group_norm_3(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 32 x1 = xindex // 32 % 64 x2 = xindex // 2048 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * ((r3 + 128 * x1) % 4096) + 262144 * x2 + (r3 + 128 * x1) // 4096), None, eviction_policy= 'evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x4, tmp10, None) tl.store(out_ptr1 + x4, tmp15, None) tl.store(out_ptr2 + x4, tmp9, None) @triton.jit def triton_per_fused_native_group_norm_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 128 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 32 x1 = xindex // 32 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_group_norm_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = xindex // 262144 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 32768.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 16 x1 = xindex // 16 % 64 x2 = xindex // 1024 x4 = xindex tmp0 = tl.load(in_ptr0 + (8 * x0 + 128 * ((r3 + 128 * x1) % 1024) + 131072 * x2 + (r3 + 128 * x1) // 1024), None, eviction_policy= 'evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x4, tmp10, None) tl.store(out_ptr1 + x4, tmp15, None) tl.store(out_ptr2 + x4, tmp9, None) @triton.jit def triton_per_fused_native_group_norm_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 2 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 2 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 2 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 16384.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_mean_native_group_norm_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 128 x4 = xindex // 128 x2 = xindex // 1024 tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') _tmp17 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * r3 + 4096 * (r3 % 32 // 32) + 16384 * x4), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = _tmp17 + tmp16 _tmp17 = tl.where(rmask, tmp18, _tmp17) tmp17 = tl.sum(_tmp17, 1)[:, None] tl.store(out_ptr0 + x5, tmp17, None) @triton.jit def triton_per_fused_mean_native_group_norm_13(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 512 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 128 x1 = xindex // 128 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 1024 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1024.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128,), (1,)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(8192, 9)](primals_6, buf0, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf1 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_1[grid(16384, 9)](primals_10, buf1, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf2 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_1[grid(16384, 9)](primals_14, buf2, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf3 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf4 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_convolution_2[grid(256, 4096)](buf3, primals_2, buf4, 256, 4096, XBLOCK=16, YBLOCK=256, num_warps=8, num_stages=1) del primals_2 buf5 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf6 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf7 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) triton_per_fused_native_group_norm_3[grid(8192)](buf4, buf5, buf6, buf7, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1) buf8 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf9 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf10 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) triton_per_fused_native_group_norm_4[grid(128)](buf5, buf6, buf7, buf8, buf9, buf10, 128, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf5 del buf6 del buf7 buf11 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf12 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf15 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_5[grid(32)](buf8, buf9, buf10, buf11, buf12, buf15, 32, 4, XBLOCK=32, num_warps=2, num_stages=1) del buf10 del buf8 del buf9 buf14 = reinterpret_tensor(buf3, (4, 64, 64, 64), (262144, 1, 4096, 64), 0) del buf3 triton_poi_fused_native_group_norm_6[grid(1048576)](buf4, buf11, buf12, primals_4, primals_5, buf14, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf16 = extern_kernels.convolution(buf14, buf0, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf17 = buf16 del buf16 triton_poi_fused_convolution_7[grid(524288)](buf17, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf18 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf19 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf20 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) triton_per_fused_native_group_norm_8[grid(4096)](buf17, buf18, buf19, buf20, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf21 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf22 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf23 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) triton_per_fused_native_group_norm_9[grid(64)](buf18, buf19, buf20, buf21, buf22, buf23, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) buf24 = buf12 del buf12 buf25 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf28 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_10[grid(32)](buf21, buf22, buf23, buf24, buf25, buf28, 32, 2, XBLOCK=32, num_warps=2, num_stages=1) buf27 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_native_group_norm_11[grid(524288)](buf17, buf24, buf25, primals_8, primals_9, buf27, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf29 = extern_kernels.convolution(buf27, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf30 = buf29 del buf29 triton_poi_fused_convolution_7[grid(524288)](buf30, primals_11, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf31 = buf20 del buf20 buf32 = buf19 del buf19 buf33 = buf18 del buf18 triton_per_fused_native_group_norm_8[grid(4096)](buf30, buf31, buf32, buf33, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf34 = buf23 del buf23 buf35 = buf22 del buf22 buf36 = buf21 del buf21 triton_per_fused_native_group_norm_9[grid(64)](buf31, buf32, buf33, buf34, buf35, buf36, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) buf37 = buf25 del buf25 buf38 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf41 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_10[grid(32)](buf34, buf35, buf36, buf37, buf38, buf41, 32, 2, XBLOCK=32, num_warps=2, num_stages=1) buf40 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_native_group_norm_11[grid(524288)](buf30, buf37, buf38, primals_12, primals_13, buf40, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf42 = extern_kernels.convolution(buf40, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf42, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf43 = buf42 del buf42 triton_poi_fused_convolution_7[grid(524288)](buf43, primals_15, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf44 = buf33 del buf33 buf45 = buf32 del buf32 buf46 = buf31 del buf31 triton_per_fused_native_group_norm_8[grid(4096)](buf43, buf44, buf45, buf46, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf47 = buf36 del buf36 buf48 = buf35 del buf35 buf49 = buf34 del buf34 triton_per_fused_native_group_norm_9[grid(64)](buf44, buf45, buf46, buf47, buf48, buf49, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf44 del buf45 buf50 = buf38 del buf38 buf51 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf53 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_10[grid(32)](buf47, buf48, buf49, buf50, buf51, buf53, 32, 2, XBLOCK=32, num_warps=2, num_stages=1) del buf47 del buf48 del buf49 buf54 = reinterpret_tensor(buf46, (4, 128, 1, 1, 8), (1024, 1, 4096, 4096, 128), 0) del buf46 triton_red_fused_mean_native_group_norm_12[grid(4096)](buf43, buf50, buf51, primals_16, primals_17, buf54, 4096, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) del buf51 del primals_17 buf55 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf56 = buf55 del buf55 triton_per_fused_mean_native_group_norm_13[grid(512)](buf56, buf54, 512, 8, XBLOCK=64, num_warps=4, num_stages=1) del buf54 return (reinterpret_tensor(buf56, (4, 128), (128, 1), 0), primals_1, primals_3, primals_4, buf0, primals_8, buf1, primals_12, buf2, primals_16, buf4, buf14, reinterpret_tensor(buf11, (4, 8), (8, 1), 0), reinterpret_tensor(buf15, (4, 8), (8, 1), 0), buf17, buf27, reinterpret_tensor(buf24, (4, 8), (8, 1), 0), reinterpret_tensor( buf28, (4, 8), (8, 1), 0), buf30, buf40, reinterpret_tensor(buf37, (4, 8), (8, 1), 0), reinterpret_tensor(buf41, (4, 8), (8, 1), 0), buf43, reinterpret_tensor(buf50, (4, 8), (8, 1), 0), reinterpret_tensor(buf53, (4, 8), (8, 1), 0)) class MNIST_CNNNew(nn.Module): def __init__(self): super(MNIST_CNNNew, self).__init__() self.conv1 = nn.Conv2d(1, 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_10 = self.conv3.weight primals_8 = self.conv3.bias primals_14 = self.conv4.weight primals_9 = self.conv4.bias primals_4 = self.bn0.weight primals_5 = self.bn0.bias primals_11 = self.bn1.weight primals_12 = self.bn1.bias primals_13 = self.bn2.weight primals_15 = self.bn2.bias primals_16 = self.bn3.weight primals_17 = self.bn3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
VinAIResearch/mDSDI
MNIST_CNN
false
18,068
[ "Apache-2.0" ]
9
8ec49085d8389ab490ec633c3ae4bf66be085366
https://github.com/VinAIResearch/mDSDI/tree/8ec49085d8389ab490ec633c3ae4bf66be085366
CNNBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5x/c5xaakzlsaar5fdpnajcaa675qnpqztrowagp2233w4jaijoxsi4.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%permute_1,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 125000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 250) % 125 x0 = xindex % 250 x3 = (xindex // 250) x4 = xindex tmp0 = tl.full([1], 0, tl.int64) tmp1 = tmp0 >= tmp0 tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp0 < tmp2 tmp4 = tmp1 & tmp3 tmp5 = (-1) + (2*x1) tmp6 = tmp5 >= tmp0 tmp7 = tl.full([1], 250, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tmp4 & tmp9 tmp11 = tl.load(in_ptr0 + ((-250) + x0 + (500*x3)), tmp10 & xmask, other=float("-inf")) tmp12 = 2*x1 tmp13 = tmp12 >= tmp0 tmp14 = tmp12 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tmp4 & tmp15 tmp17 = tl.load(in_ptr0 + (x0 + (500*x3)), tmp16 & xmask, other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x1) tmp20 = tmp19 >= tmp0 tmp21 = tmp19 < tmp7 tmp22 = tmp20 & tmp21 tmp23 = tmp4 & tmp22 tmp24 = tl.load(in_ptr0 + (250 + x0 + (500*x3)), tmp23 & xmask, other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tl.full([1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tl.store(out_ptr0 + (x4), tmp27, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/o6/co66gf5tbylkf6yghzhydv65c7guuk2vmyorqa23dzpeb67acu2m.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute_2, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 128], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1000 xnumel = 125 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 250 y1 = (yindex // 250) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (250*x2) + (31250*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (125*y3)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/or/corf32h5xrx5tzsgklnld7noq3foi7lemsoj3762tcsoym67fntz.py # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # relu_1 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%permute_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 256], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 500 xnumel = 250 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 125 y1 = (yindex // 125) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (125*x2) + (31250*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + (250*y3)), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + (125*x2) + (31360*y1)), tmp6, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xv/cxv27kubnwnohapsrkwazwn2voieogzw2kzw2yfioen7abs4tc5g.py # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] # Source node to ATen node mapping: # out_2 => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%permute_5, %permute_1), kwargs = {}) triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512, 256], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 500 xnumel = 250 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 125 y1 = (yindex // 125) y3 = yindex tmp0 = tl.load(in_out_ptr0 + (y0 + (125*x2) + (31250*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = tmp3 >= tmp3 tmp5 = tl.full([1, 1], 1, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp4 & tmp6 tmp8 = (-1) + (2*y0) tmp9 = tmp8 >= tmp3 tmp10 = tl.full([1, 1], 250, tl.int64) tmp11 = tmp8 < tmp10 tmp12 = tmp9 & tmp11 tmp13 = tmp7 & tmp12 tmp14 = tl.load(in_ptr1 + ((-250) + x2 + (500*y3)), tmp13 & xmask & ymask, eviction_policy='evict_last', other=float("-inf")) tmp15 = 2*y0 tmp16 = tmp15 >= tmp3 tmp17 = tmp15 < tmp10 tmp18 = tmp16 & tmp17 tmp19 = tmp7 & tmp18 tmp20 = tl.load(in_ptr1 + (x2 + (500*y3)), tmp19 & xmask & ymask, eviction_policy='evict_last', other=float("-inf")) tmp21 = triton_helpers.maximum(tmp20, tmp14) tmp22 = 1 + (2*y0) tmp23 = tmp22 >= tmp3 tmp24 = tmp22 < tmp10 tmp25 = tmp23 & tmp24 tmp26 = tmp7 & tmp25 tmp27 = tl.load(in_ptr1 + (250 + x2 + (500*y3)), tmp26 & xmask & ymask, eviction_policy='evict_last', other=float("-inf")) tmp28 = triton_helpers.maximum(tmp27, tmp21) tmp29 = tmp2 + tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (y0 + (125*x2) + (31250*y1)), tmp29, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 250, 250), (62500, 250, 1)) assert_size_stride(primals_2, (250, 250, 3), (750, 3, 1)) assert_size_stride(primals_3, (250, ), (1, )) assert_size_stride(primals_4, (250, 250, 3), (750, 3, 1)) assert_size_stride(primals_5, (250, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 125, 250), (31250, 250, 1), torch.float32) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(primals_1, buf0, 125000, grid=grid(125000), stream=stream0) buf1 = empty_strided_cuda((4, 250, 125), (31250, 125, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf0, buf1, 1000, 125, grid=grid(1000, 125), stream=stream0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 250, 125), (31250, 125, 1)) buf3 = reinterpret_tensor(buf1, (4, 125, 250), (31250, 250, 1), 0); del buf1 # reuse buf7 = empty_strided_cuda((4, 125, 250), (31360, 1, 125), torch.bool) # Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf2, primals_3, buf3, buf7, 500, 250, grid=grid(500, 250), stream=stream0) del primals_3 buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [relu_1, x_3], Original ATen: [aten.relu, aten.permute] triton_poi_fused_convolution_1.run(buf3, buf4, 1000, 125, grid=grid(1000, 125), stream=stream0) del buf3 # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf5, (4, 250, 125), (31250, 125, 1)) buf6 = reinterpret_tensor(buf5, (4, 125, 250), (31250, 1, 125), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add] triton_poi_fused_add_3.run(buf6, primals_5, primals_1, 500, 250, grid=grid(500, 250), stream=stream0) del primals_1 del primals_5 return (buf6, primals_2, primals_4, reinterpret_tensor(buf0, (4, 250, 125), (31250, 1, 250), 0), buf4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 250, 250), (62500, 250, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((250, 250, 3), (750, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((250, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((250, 250, 3), (750, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((250, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class CNNLayer(nn.Module): """Conv1d layer. nn.Conv1d layer require the input shape is (batch_size, in_channels, length), however, our input shape is (batch_size, length, in_channels), so we need to transpose our input data into (B, C, L_in) and send it to conv layer, and then transpose the conv output into (B, L_out, C). """ def __init__(self, in_dim, out_dim, win=3, pad=1): super().__init__() self.conv = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=win, padding=pad) def forward(self, x): """ Args: x: shape=(batch_size, max_seq_len, input_dim) """ x = x.permute(0, 2, 1) out = self.conv(x).permute(0, 2, 1) return out class MaxPool1d(nn.Module): def __init__(self, win=2, stride=None, pad=0): super().__init__() self.pooling = nn.MaxPool1d(kernel_size=win, stride=stride, padding=pad ) def forward(self, x): """ Args: x: shape=(batch_size, max_seq_len, dim) """ x = x.permute(0, 2, 1) x = self.pooling(x).permute(0, 2, 1) return x class CNNBlock(nn.Module): """Block of DPCNN. """ def __init__(self): super().__init__() hidden_dim = 250 self.pooling = MaxPool1d(3, 2, 1) self.conv1 = CNNLayer(hidden_dim, hidden_dim, 3, 1) self.conv2 = CNNLayer(hidden_dim, hidden_dim, 3, 1) def forward(self, x): res = self.pooling(x) x = self.conv1(F.relu(res)) x = self.conv2(F.relu(x)) out = x + res return out def get_inputs(): return [torch.rand([4, 250, 250])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 125000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 250 % 125 x0 = xindex % 250 x3 = xindex // 250 x4 = xindex tmp0 = tl.full([1], 0, tl.int64) tmp1 = tmp0 >= tmp0 tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp0 < tmp2 tmp4 = tmp1 & tmp3 tmp5 = -1 + 2 * x1 tmp6 = tmp5 >= tmp0 tmp7 = tl.full([1], 250, tl.int64) tmp8 = tmp5 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tmp4 & tmp9 tmp11 = tl.load(in_ptr0 + (-250 + x0 + 500 * x3), tmp10 & xmask, other= float('-inf')) tmp12 = 2 * x1 tmp13 = tmp12 >= tmp0 tmp14 = tmp12 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tmp4 & tmp15 tmp17 = tl.load(in_ptr0 + (x0 + 500 * x3), tmp16 & xmask, other=float( '-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x1 tmp20 = tmp19 >= tmp0 tmp21 = tmp19 < tmp7 tmp22 = tmp20 & tmp21 tmp23 = tmp4 & tmp22 tmp24 = tl.load(in_ptr0 + (250 + x0 + 500 * x3), tmp23 & xmask, other= float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tl.full([1], 0, tl.int32) tmp27 = triton_helpers.maximum(tmp26, tmp25) tl.store(out_ptr0 + x4, tmp27, xmask) @triton.jit def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 1000 xnumel = 125 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 250 y1 = yindex // 250 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 250 * x2 + 31250 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 125 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 500 xnumel = 250 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 125 y1 = yindex // 125 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 125 * x2 + 31250 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 250 * y3), tmp4, xmask & ymask) tl.store(out_ptr1 + (y0 + 125 * x2 + 31360 * y1), tmp6, xmask & ymask) @triton.jit def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 500 xnumel = 250 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 125 y1 = yindex // 125 y3 = yindex tmp0 = tl.load(in_out_ptr0 + (y0 + 125 * x2 + 31250 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = tmp3 >= tmp3 tmp5 = tl.full([1, 1], 1, tl.int64) tmp6 = tmp3 < tmp5 tmp7 = tmp4 & tmp6 tmp8 = -1 + 2 * y0 tmp9 = tmp8 >= tmp3 tmp10 = tl.full([1, 1], 250, tl.int64) tmp11 = tmp8 < tmp10 tmp12 = tmp9 & tmp11 tmp13 = tmp7 & tmp12 tmp14 = tl.load(in_ptr1 + (-250 + x2 + 500 * y3), tmp13 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp15 = 2 * y0 tmp16 = tmp15 >= tmp3 tmp17 = tmp15 < tmp10 tmp18 = tmp16 & tmp17 tmp19 = tmp7 & tmp18 tmp20 = tl.load(in_ptr1 + (x2 + 500 * y3), tmp19 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp21 = triton_helpers.maximum(tmp20, tmp14) tmp22 = 1 + 2 * y0 tmp23 = tmp22 >= tmp3 tmp24 = tmp22 < tmp10 tmp25 = tmp23 & tmp24 tmp26 = tmp7 & tmp25 tmp27 = tl.load(in_ptr1 + (250 + x2 + 500 * y3), tmp26 & xmask & ymask, eviction_policy='evict_last', other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp21) tmp29 = tmp2 + tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + (y0 + 125 * x2 + 31250 * y1), tmp29, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 250, 250), (62500, 250, 1)) assert_size_stride(primals_2, (250, 250, 3), (750, 3, 1)) assert_size_stride(primals_3, (250,), (1,)) assert_size_stride(primals_4, (250, 250, 3), (750, 3, 1)) assert_size_stride(primals_5, (250,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 125, 250), (31250, 250, 1), torch.float32 ) get_raw_stream(0) triton_poi_fused_relu_0[grid(125000)](primals_1, buf0, 125000, XBLOCK=512, num_warps=8, num_stages=1) buf1 = empty_strided_cuda((4, 250, 125), (31250, 125, 1), torch.float32 ) triton_poi_fused_convolution_1[grid(1000, 125)](buf0, buf1, 1000, 125, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 250, 125), (31250, 125, 1)) buf3 = reinterpret_tensor(buf1, (4, 125, 250), (31250, 250, 1), 0) del buf1 buf7 = empty_strided_cuda((4, 125, 250), (31360, 1, 125), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(500, 250)](buf2, primals_3, buf3, buf7, 500, 250, XBLOCK=2, YBLOCK=512, num_warps=4, num_stages=1) del primals_3 buf4 = buf2 del buf2 triton_poi_fused_convolution_1[grid(1000, 125)](buf3, buf4, 1000, 125, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf3 buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 250, 125), (31250, 125, 1)) buf6 = reinterpret_tensor(buf5, (4, 125, 250), (31250, 1, 125), 0) del buf5 triton_poi_fused_add_3[grid(500, 250)](buf6, primals_5, primals_1, 500, 250, XBLOCK=256, YBLOCK=4, num_warps=4, num_stages=1) del primals_1 del primals_5 return buf6, primals_2, primals_4, reinterpret_tensor(buf0, (4, 250, 125), (31250, 1, 250), 0), buf4, buf7 class CNNLayer(nn.Module): """Conv1d layer. nn.Conv1d layer require the input shape is (batch_size, in_channels, length), however, our input shape is (batch_size, length, in_channels), so we need to transpose our input data into (B, C, L_in) and send it to conv layer, and then transpose the conv output into (B, L_out, C). """ def __init__(self, in_dim, out_dim, win=3, pad=1): super().__init__() self.conv = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=win, padding=pad) def forward(self, x): """ Args: x: shape=(batch_size, max_seq_len, input_dim) """ x = x.permute(0, 2, 1) out = self.conv(x).permute(0, 2, 1) return out class MaxPool1d(nn.Module): def __init__(self, win=2, stride=None, pad=0): super().__init__() self.pooling = nn.MaxPool1d(kernel_size=win, stride=stride, padding=pad ) def forward(self, x): """ Args: x: shape=(batch_size, max_seq_len, dim) """ x = x.permute(0, 2, 1) x = self.pooling(x).permute(0, 2, 1) return x class CNNBlockNew(nn.Module): """Block of DPCNN. """ def __init__(self): super().__init__() hidden_dim = 250 self.pooling = MaxPool1d(3, 2, 1) self.conv1 = CNNLayer(hidden_dim, hidden_dim, 3, 1) self.conv2 = CNNLayer(hidden_dim, hidden_dim, 3, 1) def forward(self, input_0): primals_2 = self.conv1.conv.weight primals_3 = self.conv1.conv.bias primals_4 = self.conv2.conv.weight primals_5 = self.conv2.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
WiseDoge/Text-Classification-PyTorch
CNNBlock
false
18,069
[ "MIT" ]
6
9371eeed6bd7ecf1d529c8f2a6c997fcde67a559
https://github.com/WiseDoge/Text-Classification-PyTorch/tree/9371eeed6bd7ecf1d529c8f2a6c997fcde67a559
AttnLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xc/cxcslufnu6arlkllhqsj22g4bei6y35vjp7mzlkgg2t2qgxxhlbc.py # Topologically Sorted Source Nodes: [query], Original ATen: [aten.tanh] # Source node to ATen node mapping: # query => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/oo/coobrm7cytaeij2eyoctjbuz6lnjrauzvpxipvlrrepxba5rhlgq.py # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wp/cwpr2jp7bpqwkbaxgttcl2ekjdtkf6lezuspoiunutzd3j2qgeyk.py # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # scores_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [query], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((1, 16, 1), (16, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_4, (1, 4, 1), (4, 1, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [attn_vec], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0), primals_3, out=buf5) del buf4 return (reinterpret_tensor(buf5, (4, 4), (4, 1), 0), primals_3, buf1, buf2, reinterpret_tensor(primals_4, (1, 1, 4), (4, 1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class AttnLayer(nn.Module): """Attention layer. w is context vector. Formula: $$ v_i=tanh(Wh_i+b)\\ lpha_i = v_i^Tw\\ lpha_i = softmax(lpha_i)\\ Vec = \\sum_0^L lpha_ih_i $$ """ def __init__(self, hidden_dim, attn_dim): super().__init__() self.weight = nn.Linear(hidden_dim, attn_dim) self.context = nn.Parameter(torch.randn(attn_dim)) def forward(self, x): """ x: shape=(batch_size, max_len, hidden_dim) """ query = self.weight(x).tanh() scores = torch.einsum('bld,d->bl', query, self.context) scores = F.softmax(scores, dim=-1) attn_vec = torch.einsum('bl,blh->bh', scores, x) return attn_vec def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4, 'attn_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((1, 16, 1), (16, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_4, (1, 4, 1), (4, 1, 1), 0), out =buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 0, 1), 0 ), primals_3, out=buf5) del buf4 return reinterpret_tensor(buf5, (4, 4), (4, 1), 0 ), primals_3, buf1, buf2, reinterpret_tensor(primals_4, (1, 1, 4), (4, 1, 1), 0) class AttnLayerNew(nn.Module): """Attention layer. w is context vector. Formula: $$ v_i=tanh(Wh_i+b)\\ lpha_i = v_i^Tw\\ lpha_i = softmax(lpha_i)\\ Vec = \\sum_0^L lpha_ih_i $$ """ def __init__(self, hidden_dim, attn_dim): super().__init__() self.weight = nn.Linear(hidden_dim, attn_dim) self.context = nn.Parameter(torch.randn(attn_dim)) def forward(self, input_0): primals_2 = self.context primals_1 = self.weight.weight primals_4 = self.weight.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
WiseDoge/Text-Classification-PyTorch
AttnLayer
false
18,070
[ "MIT" ]
6
9371eeed6bd7ecf1d529c8f2a6c997fcde67a559
https://github.com/WiseDoge/Text-Classification-PyTorch/tree/9371eeed6bd7ecf1d529c8f2a6c997fcde67a559
knn_ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/64/c64kiymgfgu77v6capq5yaaikrxf3ysnewkca434ni7giikaz522.py # Topologically Sorted Source Nodes: [add, sub, cost], Original ATen: [aten.add, aten.sub, aten.clamp] # Source node to ATen node mapping: # add => add # cost => clamp_min # sub => sub # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, 0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %view), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0), kwargs = {}) triton_poi_fused_add_clamp_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_clamp_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (5*x0), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp1) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [knn_scores], Original ATen: [aten.mm] extern_kernels.mm(arg2_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) del arg2_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1) del arg0_1 del arg1_1 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [add, sub, cost], Original ATen: [aten.add, aten.sub, aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_add_clamp_sub_0.run(buf0, buf1, buf2, 4, grid=grid(4), stream=stream0) del buf0 del buf1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn from torch.autograd import * import torch.nn.init def cosine_sim(im, s): """Cosine similarity between all the image and sentence pairs """ return im.mm(s.t()) def order_sim(im, s): """Order embeddings similarity measure $max(0, s-im)$ """ YmX = s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1) ) - im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)) score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t() return score class knn_ContrastiveLoss(nn.Module): """ Compute contrastive loss """ def __init__(self, margin=0, measure=False): super(knn_ContrastiveLoss, self).__init__() self.margin = margin if measure == 'order': self.sim = order_sim else: self.sim = cosine_sim def forward(self, im, knn_im, s): scores = self.sim(im, s) diagonal = scores.diag().view(im.size(0), 1) knn_scores = self.sim(knn_im, s) knn_diagonal = knn_scores.diag().view(knn_im.size(0), 1) cost = (self.margin + knn_diagonal - diagonal).clamp(min=0) return cost def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.autograd import * import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_clamp_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 5 * x0, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = triton_helpers.maximum(tmp4, tmp1) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg2_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) del arg2_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1) del arg0_1 del arg1_1 buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_sub_0[grid(4)](buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf0 del buf1 return buf2, def cosine_sim(im, s): """Cosine similarity between all the image and sentence pairs """ return im.mm(s.t()) def order_sim(im, s): """Order embeddings similarity measure $max(0, s-im)$ """ YmX = s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1) ) - im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)) score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t() return score class knn_ContrastiveLossNew(nn.Module): """ Compute contrastive loss """ def __init__(self, margin=0, measure=False): super(knn_ContrastiveLossNew, self).__init__() self.margin = margin if measure == 'order': self.sim = order_sim else: self.sim = cosine_sim def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
WuJie1010/Fine-Grained-Image-Captioning
knn_ContrastiveLoss
false
18,071
[ "MIT" ]
9
340bc1868634f3bf0fdd62d439fec32ee1b45407
https://github.com/WuJie1010/Fine-Grained-Image-Captioning/tree/340bc1868634f3bf0fdd62d439fec32ee1b45407
HuberLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zd/czdy2rdwjzwjkrxa3uneax6ph26777yzaefdhz6ayvv6pkkk54mp.py # Topologically Sorted Source Nodes: [truediv, truediv_1, loss, mul, mul_1], Original ATen: [aten.div, aten.smooth_l1_loss, aten.mul] # Source node to ATen node mapping: # loss => abs_1, div_2, lt, mean, mul, pow_1, sub, sub_1, where # mul => mul_1 # mul_1 => mul_2 # truediv => div # truediv_1 => div_1 # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, 1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div_2, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 1), kwargs = {}) triton_per_fused_div_mul_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_div_mul_smooth_l1_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 < tmp1 tmp8 = tmp6 * tmp6 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp1 tmp12 = tmp6 - tmp9 tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [truediv, truediv_1, loss, mul, mul_1], Original ATen: [aten.div, aten.smooth_l1_loss, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_div_mul_smooth_l1_loss_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class HuberLoss(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, x, x_hat): loss = self.huber_loss_delta1(x / self.delta, x_hat / self.delta) return loss * self.delta * self.delta def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 < tmp1 tmp8 = tmp6 * tmp6 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp1 tmp12 = tmp6 - tmp9 tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_smooth_l1_loss_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HuberLossNew(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
UT-Austin-RPL/maple
HuberLoss
false
18,072
[ "MIT" ]
9
aef9fe9869945df5bbd1b02fd40813aac135cf5a
https://github.com/UT-Austin-RPL/maple/tree/aef9fe9869945df5bbd1b02fd40813aac135cf5a
Conv1dSamePadding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ez/cez2jzexmb4sge6ppw5ybppxmrq4agzqvub7dwp2b4oiuokyfs3y.py # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # pad => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/sg/csgq5ftjyp6tfkdvqut5tb3vhshsnitw4abeanxhrswll5a6udh3.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 3) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 24, grid=grid(24), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 6), (0, 6, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 3), (12, 3, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 12, grid=grid(12), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 3), (3, 1), 0), primals_2, reinterpret_tensor(buf0, (1, 4, 6), (24, 6, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class Conv1dSamePadding(nn.Conv1d): """ 1D convolutional layer with "same" padding (no downsampling), that is also compatible with strides > 1 """ def __init__(self, *args, **kwargs): super(Conv1dSamePadding, self).__init__(*args, **kwargs) def forward(self, inputs): """ Given an input of size [B, CI, WI], return an output [B, CO, WO], where WO = [CI + 2P - K - (K - 1) * (D - 1)] / S + 1, by computing P on-the-fly ay forward time B: batch size CI: input channels WI: input width CO: output channels WO: output width P: padding K: kernel size D: dilation S: stride """ padding = (self.stride[0] * (inputs.shape[-1] - 1) - inputs.shape[- 1] + self.kernel_size[0] + (self.dilation[0] - 1) * (self. kernel_size[0] - 1)) // 2 return self._conv_forward(F.pad(inputs, (padding, padding)), self. weight, self.bias) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6), (6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(24)](primals_1, buf0, 24, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 6 ), (0, 6, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 3), (12, 3, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(12)](buf2, primals_3, 12, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 3), (3, 1), 0 ), primals_2, reinterpret_tensor(buf0, (1, 4, 6), (24, 6, 1), 0) class Conv1dSamePaddingNew(nn.Conv1d): """ 1D convolutional layer with "same" padding (no downsampling), that is also compatible with strides > 1 """ def __init__(self, *args, **kwargs): super(Conv1dSamePaddingNew, self).__init__(*args, **kwargs) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Wadaboa/titanet
Conv1dSamePadding
false
18,073
[ "MIT" ]
4
b07e3074e79ea8c1129fb0adb8315e06bb4943ea
https://github.com/Wadaboa/titanet/tree/b07e3074e79ea8c1129fb0adb8315e06bb4943ea
Wang
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/mv/cmv2uso5mqqd2odadbf5hlzfwj23scjlx7gtzjfpkhwehnnuswxt.py # Topologically Sorted Source Nodes: [rel_coeffs, mul, sum_1, ret], Original ATen: [aten.index, aten.mul, aten.sum, aten.add] # Source node to ATen node mapping: # mul => mul # rel_coeffs => index # ret => add # sum_1 => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_1, [%primals_2]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %primals_3), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %primals_4), kwargs = {}) triton_poi_fused_add_index_mul_sum_0 = async_compile.triton('triton_poi_fused_add_index_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_index_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_index_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 4 x2 = (xindex // 16) x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (x3 + (64*x2)), xmask) tmp9 = tl.load(in_ptr2 + (16 + x3 + (64*x2)), xmask) tmp12 = tl.load(in_ptr2 + (32 + x3 + (64*x2)), xmask) tmp15 = tl.load(in_ptr2 + (48 + x3 + (64*x2)), xmask) tmp18 = tl.load(in_ptr3 + (0)) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4), xmask, eviction_policy='evict_last') tmp8 = tmp6 * tmp7 tmp10 = tmp6 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp6 * tmp12 tmp14 = tmp11 + tmp13 tmp16 = tmp6 * tmp15 tmp17 = tmp14 + tmp16 tmp20 = tmp17 + tmp19 tl.store(out_ptr0 + (x4), tmp20, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/a5/ca56rgpdjbilaspdaau44lnrilsxekhmcnbwzhtrcgfbilkik27p.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xl/cxls6dl5dz3ua4ilno7rjcfd6m7p4ydnd3mzfaq2cepnph6e2y7h.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_2 # Graph fragment: # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [rel_coeffs, mul, sum_1, ret], Original ATen: [aten.index, aten.mul, aten.sum, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_index_mul_sum_0.run(primals_2, primals_1, primals_3, primals_4, buf0, 64, grid=grid(64), stream=stream0) del primals_1 del primals_4 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf1, buf2, 64, grid=grid(64), stream=stream0) del buf1 return (buf2, primals_2, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((), (), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class Wang(nn.Module): """Neural network model for linear combination of EDU scores. """ def __init__(self, nrels): """Class constructor. Args: nrels (int): total number of relations """ super(Wang, self).__init__() d = np.ones((nrels, 1), dtype=np.float32) d[0] = 0 self.d = nn.Parameter(torch.tensor(d)) self.b = nn.Parameter(torch.tensor(0.5)) def forward(self, rel_indices, x): rel_coeffs = self.d[rel_indices] ret = torch.sum(rel_coeffs * x, dim=1) + self.b return F.softmax(ret, dim=-1) def get_inputs(): return [torch.ones([4], dtype=torch.int64), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nrels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_index_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr2 + (x3 + 64 * x2), xmask) tmp9 = tl.load(in_ptr2 + (16 + x3 + 64 * x2), xmask) tmp12 = tl.load(in_ptr2 + (32 + x3 + 64 * x2), xmask) tmp15 = tl.load(in_ptr2 + (48 + x3 + 64 * x2), xmask) tmp18 = tl.load(in_ptr3 + 0) tmp19 = tl.broadcast_to(tmp18, [XBLOCK]) tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + tmp4, xmask, eviction_policy='evict_last') tmp8 = tmp6 * tmp7 tmp10 = tmp6 * tmp9 tmp11 = tmp8 + tmp10 tmp13 = tmp6 * tmp12 tmp14 = tmp11 + tmp13 tmp16 = tmp6 * tmp15 tmp17 = tmp14 + tmp16 tmp20 = tmp17 + tmp19 tl.store(out_ptr0 + x4, tmp20, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_index_mul_sum_0[grid(64)](primals_2, primals_1, primals_3, primals_4, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_4 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_2[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf1 return buf2, primals_2, primals_3, buf2 class WangNew(nn.Module): """Neural network model for linear combination of EDU scores. """ def __init__(self, nrels): """Class constructor. Args: nrels (int): total number of relations """ super(WangNew, self).__init__() d = np.ones((nrels, 1), dtype=np.float32) d[0] = 0 self.d = nn.Parameter(torch.tensor(d)) self.b = nn.Parameter(torch.tensor(0.5)) def forward(self, input_0, input_1): primals_1 = self.d primals_4 = self.b primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
WladimirSidorenko/DASA
Wang
false
18,074
[ "MIT" ]
7
618d9060a5fd6f567628c8dec5e26943c8c49ad4
https://github.com/WladimirSidorenko/DASA/tree/618d9060a5fd6f567628c8dec5e26943c8c49ad4
AdditiveAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/62/c62gzr2lrxaylngalcsaygcsiowvpaag6jlmfr5cb4v4giqgv7en.py # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] # Source node to ATen node mapping: # linear => mm # Graph fragment: # %mm : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze, %permute), kwargs = {}) triton_poi_fused_mm_0 = async_compile.triton('triton_poi_fused_mm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5p/c5pwolrv5gmjz4ovo2s5aca5tr3qktekejcqs3zr2kelj5w5nels.py # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm] # Source node to ATen node mapping: # linear_9 => mm_9 # Graph fragment: # %mm_9 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze_3, %permute), kwargs = {}) triton_poi_fused_mm_1 = async_compile.triton('triton_poi_fused_mm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mm_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/h3/ch3hfjk74auqpkgdecqhggmnitpr7k3rkcag7qe7c4icrybbysew.py # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] # Source node to ATen node mapping: # linear_3 => mm_3 # Graph fragment: # %mm_3 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze_1, %permute), kwargs = {}) triton_poi_fused_mm_2 = async_compile.triton('triton_poi_fused_mm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mm_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/lw/clwtvso4agplcn6ght7r2jc6fyek23jejjn3ohlqngg3ngunug6e.py # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm] # Source node to ATen node mapping: # linear_6 => mm_6 # Graph fragment: # %mm_6 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%unsqueeze_2, %permute), kwargs = {}) triton_poi_fused_mm_3 = async_compile.triton('triton_poi_fused_mm_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mm_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7c/c7cwgr5bf4tk7v3glfsdblvnbxaozjelwhqlczk23lo24wqjgszu.py # Topologically Sorted Source Nodes: [add, tanh, add_1, tanh_1, add_2, tanh_2, add_3, tanh_3], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # add_3 => add_3 # tanh => tanh # tanh_1 => tanh_1 # tanh_2 => tanh_2 # tanh_3 => tanh_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze, %mm_1), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_1, %mm_1), kwargs = {}) # %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %mm_1), kwargs = {}) # %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_3, %mm_1), kwargs = {}) # %tanh_3 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {}) triton_poi_fused_add_tanh_4 = async_compile.triton('triton_poi_fused_add_tanh_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp5 = tmp4 + tmp1 tmp6 = libdevice.tanh(tmp5) tmp8 = tmp7 + tmp1 tmp9 = libdevice.tanh(tmp8) tmp11 = tmp10 + tmp1 tmp12 = libdevice.tanh(tmp11) tl.store(out_ptr0 + (x2), tmp3, xmask) tl.store(out_ptr1 + (x2), tmp6, xmask) tl.store(out_ptr2 + (x2), tmp9, xmask) tl.store(out_ptr3 + (x2), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hy/chynx6iz3lpfibcn7ilyxquse2fdbjb5w576ohjk2c3pyew5hyfp.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%cat_mm, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat_mm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_5 = async_compile.triton('triton_poi_fused__softmax_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fx/cfxtgcfwu67bbvvmkzhtnoxet57hymlbr72xf6wer2hxivssjgeu.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_6 = async_compile.triton('triton_poi_fused__softmax_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7a/c7ajfw564lbyj4xawangkalkkupqja75fawoho7rhpnudaj2ph4y.py # Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # final_context_vec => sum_2 # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_4, %primals_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_mul_sum_7 = async_compile.triton('triton_poi_fused_mul_sum_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + (x2), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] stream0 = get_raw_stream(0) triton_poi_fused_mm_0.run(primals_1, buf0, 4, grid=grid(4), stream=stream0) buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf10 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm] triton_poi_fused_mm_1.run(primals_1, buf10, 4, grid=grid(4), stream=stream0) buf11 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_9], Original ATen: [aten.mm] extern_kernels.mm(buf10, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf11) buf4 = buf10; del buf10 # reuse # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] triton_poi_fused_mm_2.run(primals_1, buf4, 4, grid=grid(4), stream=stream0) buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.mm] extern_kernels.mm(buf4, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf5) buf7 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm] triton_poi_fused_mm_3.run(primals_1, buf7, 4, grid=grid(4), stream=stream0) buf8 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_6], Original ATen: [aten.mm] extern_kernels.mm(buf7, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf8) del buf7 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, tanh, add_1, tanh_1, add_2, tanh_2, add_3, tanh_3], Original ATen: [aten.add, aten.tanh] triton_poi_fused_add_tanh_4.run(buf1, buf2, buf5, buf8, buf11, buf3, buf6, buf9, buf12, 16, grid=grid(16), stream=stream0) del buf1 del buf11 del buf5 del buf8 buf17 = buf2; del buf2 # reuse buf13 = reinterpret_tensor(buf17, (4, 1), (4, 1), 0) # alias # Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat] extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf13) buf14 = reinterpret_tensor(buf17, (4, 1), (4, 1), 1) # alias # Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat] extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf14) buf15 = reinterpret_tensor(buf17, (4, 1), (4, 1), 2) # alias # Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat] extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf15) buf16 = reinterpret_tensor(buf17, (4, 1), (4, 1), 3) # alias # Topologically Sorted Source Nodes: [score_vec], Original ATen: [aten.cat] extern_kernels.mm(buf12, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf16) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_5.run(buf17, buf18, 16, grid=grid(16), stream=stream0) del buf13 del buf14 del buf15 del buf16 buf19 = buf17; del buf17 # reuse # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_6.run(buf18, buf19, 16, grid=grid(16), stream=stream0) buf20 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [mul, final_context_vec], Original ATen: [aten.mul, aten.sum] triton_poi_fused_mul_sum_7.run(buf19, primals_1, buf20, 16, grid=grid(16), stream=stream0) return (buf20, reinterpret_tensor(buf19, (4, 4, 1), (4, 1, 1), 0), primals_1, primals_4, reinterpret_tensor(primals_1, (1, 4), (16, 4), 0), buf3, reinterpret_tensor(primals_1, (1, 4), (16, 4), 1), buf6, reinterpret_tensor(primals_1, (1, 4), (16, 4), 2), buf9, reinterpret_tensor(primals_1, (1, 4), (16, 4), 3), buf12, buf19, primals_5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class AdditiveAttention(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttention, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, encoder_states, decoder_state): score_vec = torch.cat([self.score(encoder_states[:, i], decoder_state) for i in range(encoder_states.shape[1])], dim=1) attention_probs = torch.unsqueeze(F.softmax(score_vec, dim=1), dim=2) final_context_vec = torch.sum(attention_probs * encoder_states, dim=1) return final_context_vec, attention_probs def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'encoder_hidden_state_dim': 4, 'decoder_hidden_state_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mm_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mm_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_mm_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_tanh_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp5 = tmp4 + tmp1 tmp6 = libdevice.tanh(tmp5) tmp8 = tmp7 + tmp1 tmp9 = libdevice.tanh(tmp8) tmp11 = tmp10 + tmp1 tmp12 = libdevice.tanh(tmp11) tl.store(out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr1 + x2, tmp6, xmask) tl.store(out_ptr2 + x2, tmp9, xmask) tl.store(out_ptr3 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mm_0[grid(4)](primals_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) del primals_3 buf10 = buf0 del buf0 triton_poi_fused_mm_1[grid(4)](primals_1, buf10, 4, XBLOCK=4, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf11) buf4 = buf10 del buf10 triton_poi_fused_mm_2[grid(4)](primals_1, buf4, 4, XBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf5) buf7 = buf4 del buf4 triton_poi_fused_mm_3[grid(4)](primals_1, buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(buf7, reinterpret_tensor(primals_2, (4, 4), (1, 4 ), 0), out=buf8) del buf7 del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_tanh_4[grid(16)](buf1, buf2, buf5, buf8, buf11, buf3, buf6, buf9, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf11 del buf5 del buf8 buf17 = buf2 del buf2 buf13 = reinterpret_tensor(buf17, (4, 1), (4, 1), 0) extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf13) buf14 = reinterpret_tensor(buf17, (4, 1), (4, 1), 1) extern_kernels.mm(buf6, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf14) buf15 = reinterpret_tensor(buf17, (4, 1), (4, 1), 2) extern_kernels.mm(buf9, reinterpret_tensor(primals_5, (4, 1), (1, 4 ), 0), out=buf15) buf16 = reinterpret_tensor(buf17, (4, 1), (4, 1), 3) extern_kernels.mm(buf12, reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf16) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_5[grid(16)](buf17, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf13 del buf14 del buf15 del buf16 buf19 = buf17 del buf17 triton_poi_fused__softmax_6[grid(16)](buf18, buf19, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = buf18 del buf18 triton_poi_fused_mul_sum_7[grid(16)](buf19, primals_1, buf20, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf20, reinterpret_tensor(buf19, (4, 4, 1), (4, 1, 1), 0 ), primals_1, primals_4, reinterpret_tensor(primals_1, (1, 4), (16, 4), 0), buf3, reinterpret_tensor(primals_1, (1, 4), (16, 4), 1 ), buf6, reinterpret_tensor(primals_1, (1, 4), (16, 4), 2 ), buf9, reinterpret_tensor(primals_1, (1, 4), (16, 4), 3 ), buf12, buf19, primals_5 class AdditiveAttentionNew(nn.Module): def __init__(self, encoder_hidden_state_dim, decoder_hidden_state_dim, internal_dim=None): super(AdditiveAttentionNew, self).__init__() if internal_dim is None: internal_dim = int((encoder_hidden_state_dim + decoder_hidden_state_dim) / 2) self.w1 = nn.Linear(encoder_hidden_state_dim, internal_dim, bias=False) self.w2 = nn.Linear(decoder_hidden_state_dim, internal_dim, bias=False) self.v = nn.Linear(internal_dim, 1, bias=False) def score(self, encoder_state, decoder_state): return self.v(torch.tanh(self.w1(encoder_state) + self.w2( decoder_state))) def forward(self, input_0, input_1): primals_1 = self.w1.weight primals_2 = self.w2.weight primals_5 = self.v.weight primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
Vision-CAIR/HalentNet
AdditiveAttention
false
18,075
[ "MIT" ]
4
dedef73c57c63aa580fc497fa42d512f4241a64b
https://github.com/Vision-CAIR/HalentNet/tree/dedef73c57c63aa580fc497fa42d512f4241a64b
SVM
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/4w/c4w3qjo5ignni66fjxe2qbiwiluy2kt3ru45stogsoyxgzsx7fbu.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.sigmoid, aten.sigmoid_backward] # Source node to ATen node mapping: # y => sigmoid # Graph fragment: # %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {}) triton_poi_fused_sigmoid_sigmoid_backward_0 = async_compile.triton('triton_poi_fused_sigmoid_sigmoid_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_sigmoid_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp4 * tmp6 tl.store(in_out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr0 + (x0), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.sigmoid, aten.sigmoid_backward] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_sigmoid_backward_0.run(buf1, primals_2, buf2, 64, grid=grid(64), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (64, ), (1, ), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SVM(nn.Module): def __init__(self, hidden_size): super(SVM, self).__init__() self.linear1 = nn.Linear(hidden_size, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): y = self.sigmoid(self.linear1(x)) return y.view(-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_sigmoid_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = tmp4 * tmp6 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sigmoid_sigmoid_backward_0[grid(64)](buf1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (64,), (1,), 0), reinterpret_tensor( primals_3, (64, 4), (4, 1), 0), buf2 class SVMNew(nn.Module): def __init__(self, hidden_size): super(SVMNew, self).__init__() self.linear1 = nn.Linear(hidden_size, 1) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
XIAOYEJIAYOU/GSAN
SVM
false
18,076
[ "MIT" ]
6
8ca4fdf4c3d615af9cc10e1f9f22ceb7e27fe196
https://github.com/XIAOYEJIAYOU/GSAN/tree/8ca4fdf4c3d615af9cc10e1f9f22ceb7e27fe196
MLP
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/ng/cngjwaj32ulse46l45qzlmxoqdt4xu62eznv6tfk7uhocabugjkd.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), primals_6, primals_4, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class MLP(nn.Module): def __init__(self, num_actions): super(MLP, self).__init__() self.fc = nn.Linear(4, 128) self.logits = nn.Linear(128, num_actions) self.value = nn.Linear(128, 1) def forward(self, x): x = torch.relu(self.fc(x)) logits = self.logits(x) value = self.value(x) return logits, value def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 128), (128, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf5, 8192, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf4 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), primals_6, primals_4, buf5 class MLPNew(nn.Module): def __init__(self, num_actions): super(MLPNew, self).__init__() self.fc = nn.Linear(4, 128) self.logits = nn.Linear(128, num_actions) self.value = nn.Linear(128, 1) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.logits.weight primals_5 = self.logits.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1]
XFFXFF/endorphin
MLP
false
18,077
[ "Apache-2.0" ]
5
a29d6faf76284e5346d900dfd4fdeda82c710744
https://github.com/XFFXFF/endorphin/tree/a29d6faf76284e5346d900dfd4fdeda82c710744
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/bx/cbx7bul76cmzmgnt7imfsql2you2bzhru7krslndn4kftvafejhz.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%repeat, %primals_1], -1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = (xindex // 32) x3 = (xindex // 8) x4 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x3) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x4), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/lk/clk2dsvgo4mvyhfspwd3lbypql6ohqa6qtqbil3qeaz57liyqogz.py # Topologically Sorted Source Nodes: [tanh_W_s_h], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh_W_s_h => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/wd/cwdcgou5wrlu7uqoc7vbrkfi6bm7v333yqp6vr3jxwiuzpl2ziqk.py # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] # Source node to ATen node mapping: # repeat_1 => repeat_1 # Graph fragment: # %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4, 1]), kwargs = {}) triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qr/cqrq2cibi7axpdkpmk7q2m6dayogfmnbjtd2hrd77sujb5g7rxxx.py # Topologically Sorted Source Nodes: [e_1, att_weights], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_weights => amax, exp, sub, sum_1 # e_1 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_poi_fused__softmax_add_3 = async_compile.triton('triton_poi_fused__softmax_add_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + (x2), tmp11, xmask) tl.store(out_ptr1 + (x2), tmp22, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qm/cqmltvovjzg6bpcc5h3m6lhqwi44fnt3f6xc7zmpnx6bxtvnifwz.py # Topologically Sorted Source Nodes: [e_1, att_weights], Original ATen: [aten.add, aten._softmax] # Source node to ATen node mapping: # att_weights => amax, div, exp, sub, sum_1 # e_1 => add # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, %squeeze), kwargs = {}) # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_add_4 = async_compile.triton('triton_poi_fused__softmax_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_add_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [tanh_W_s_h], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf2, 64, grid=grid(64), stream=stream0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat_1], Original ATen: [aten.repeat] triton_poi_fused_repeat_2.run(primals_4, buf3, 16, grid=grid(16), stream=stream0) del primals_4 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [e_1, att_weights], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_3.run(primals_5, buf4, buf5, buf6, 64, grid=grid(64), stream=stream0) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [e_1, att_weights], Original ATen: [aten.add, aten._softmax] triton_poi_fused__softmax_add_4.run(primals_5, buf4, buf5, buf6, buf7, 256, grid=grid(256), stream=stream0) del buf4 del buf5 del buf6 del primals_5 return (buf7, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf7, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): def __init__(self, n_hidden_enc, n_hidden_dec): super().__init__() self.h_hidden_enc = n_hidden_enc self.h_hidden_dec = n_hidden_dec self.W = nn.Linear(n_hidden_enc + n_hidden_dec, n_hidden_dec, bias= False) self.V = nn.Parameter(torch.rand(n_hidden_dec)) def forward(self, hidden_dec, last_layer_enc, attention_mask): """ PARAMS: hidden_dec: [b, n_hidden_dec] last_layer_enc: [b, seq_len, n_hidden_enc * 2] RETURN: att_weights: [b, src_seq_len] """ batch_size = last_layer_enc.size(0) src_seq_len = last_layer_enc.size(1) hidden_dec = hidden_dec.unsqueeze(1).repeat(1, src_seq_len, 1) tanh_W_s_h = torch.tanh(self.W(torch.cat((hidden_dec, last_layer_enc), dim=-1))) tanh_W_s_h = tanh_W_s_h.permute(0, 2, 1) V = self.V.repeat(batch_size, 1).unsqueeze(1) e = torch.bmm(V, tanh_W_s_h).squeeze(1) e = attention_mask + e att_weights = F.softmax(e, dim=1) return att_weights def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]) ] def get_init_inputs(): return [[], {'n_hidden_enc': 4, 'n_hidden_dec': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x2 = xindex // 32 x3 = xindex // 8 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_add_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tmp4 - tmp11 tmp15 = tl_math.exp(tmp14) tmp16 = tmp13 + tmp15 tmp17 = tmp7 - tmp11 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp10 - tmp11 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tl.store(out_ptr0 + x2, tmp11, xmask) tl.store(out_ptr1 + x2, tmp22, xmask) @triton.jit def triton_poi_fused__softmax_add_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr3 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1) del primals_3 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_tanh_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf4) buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32) triton_poi_fused__softmax_add_3[grid(64)](primals_5, buf4, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_4[grid(256)](primals_5, buf4, buf5, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del buf5 del buf6 del primals_5 return buf7, reinterpret_tensor(buf0, (16, 8), (8, 1), 0 ), buf2, buf7, reinterpret_tensor(buf3, (4, 4, 1), (4, 1, 4), 0) class AttentionNew(nn.Module): def __init__(self, n_hidden_enc, n_hidden_dec): super().__init__() self.h_hidden_enc = n_hidden_enc self.h_hidden_dec = n_hidden_dec self.W = nn.Linear(n_hidden_enc + n_hidden_dec, n_hidden_dec, bias= False) self.V = nn.Parameter(torch.rand(n_hidden_dec)) def forward(self, input_0, input_1, input_2): primals_4 = self.V primals_3 = self.W.weight primals_2 = input_0 primals_1 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
VisualJoyce/ChengyuBERT
Attention
false
18,078
[ "MIT" ]
8
605db3a4b3241dd4d02baa41a68bf23b5b00b36d
https://github.com/VisualJoyce/ChengyuBERT/tree/605db3a4b3241dd4d02baa41a68bf23b5b00b36d
SEModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/yt/cytnwjfj2fwh3czq2vl4j7w7fjotaquc3crv6vgeaqdaw77o76vp.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] # Source node to ATen node mapping: # x => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2, -3], True), kwargs = {}) triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5x/c5xvbbrbrcgfjcyatbfkvnueua5ae2wvjxm5atfvp25dbd2xeb7g.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/va/cvahq7bwdf37kayfnung5geylifqe6iggx2yfk65vyjdptqkn76q.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/za/czalxflwqycvo5d526kdekopuvio357kxbzo7c6fo3fwmp5s6a7k.py # Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul] # Source node to ATen node mapping: # mul => mul # x_4 => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1, 1), (4, 1, 1, 1, 1)) assert_size_stride(primals_3, (16, ), (1, )) assert_size_stride(primals_4, (4, 16, 1, 1, 1), (16, 1, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_per_fused_mean_0.run(buf1, primals_1, 4, 64, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1)) buf3 = reinterpret_tensor(buf2, (16, 1, 1, 1), (1, 16, 16, 16), 0); del buf2 # reuse buf7 = empty_strided_cuda((16, 1, 1, 1), (1, 1, 1, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_3, buf7, 16, grid=grid(16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 16, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_5, 4, grid=grid(4), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4, mul], Original ATen: [aten.sigmoid, aten.mul] triton_poi_fused_mul_sigmoid_3.run(primals_1, buf5, buf6, 256, grid=grid(256), stream=stream0) return (buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0), buf5, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, 4, 1, 1, 1), (4, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 16, 1, 1, 1), (16, 1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class SEModule(nn.Module): def __init__(self, channels, reduction): super().__init__() self.avg_pool = nn.AdaptiveAvgPool3d(1) self.bottleneck = self._round_width(channels, reduction) self.fc1 = nn.Conv3d(channels, self.bottleneck, kernel_size=1, padding=0) self.relu = nn.ReLU() self.fc2 = nn.Conv3d(self.bottleneck, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() @staticmethod def _round_width(width, multiplier, min_width=8, divisor=8): width *= multiplier min_width = min_width or divisor width_out = max(min_width, int(width + divisor / 2) // divisor * divisor) if width_out < 0.9 * width: width_out += divisor return int(width_out) def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'reduction': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1, 1, 1), (4, 1, 1, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 16, 1, 1, 1), (16, 1, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1)) buf3 = reinterpret_tensor(buf2, (16, 1, 1, 1), (1, 16, 16, 16), 0) del buf2 buf7 = empty_strided_cuda((16, 1, 1, 1), (1, 1, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(16)](buf3, primals_3, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(reinterpret_tensor(buf3, (1, 16, 1, 1, 1), (0, 1, 0, 0, 0), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf4, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(4)](buf5, primals_5, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf6, primals_1, primals_2, primals_4, reinterpret_tensor(buf1, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1), 0), reinterpret_tensor(buf3, (1, 16, 1, 1, 1), (16, 1, 1, 1, 1), 0), buf5, buf7 class SEModuleNew(nn.Module): def __init__(self, channels, reduction): super().__init__() self.avg_pool = nn.AdaptiveAvgPool3d(1) self.bottleneck = self._round_width(channels, reduction) self.fc1 = nn.Conv3d(channels, self.bottleneck, kernel_size=1, padding=0) self.relu = nn.ReLU() self.fc2 = nn.Conv3d(self.bottleneck, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() @staticmethod def _round_width(width, multiplier, min_width=8, divisor=8): width *= multiplier min_width = min_width or divisor width_out = max(min_width, int(width + divisor / 2) // divisor * divisor) if width_out < 0.9 * width: width_out += divisor return int(width_out) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Viditagarwal7479/Video-Swin-Transformer
SEModule
false
18,079
[ "Apache-2.0" ]
9
37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
https://github.com/Viditagarwal7479/Video-Swin-Transformer/tree/37910ef3141c7b2eef76544f9ec8bdf26ec94c7d
BertSelfAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/lh/clh4qkas6kymmge5sjkrotgaojywmpy5pjnwzrqo6ugbfmgmcf4n.py # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] # Source node to ATen node mapping: # attention_scores => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/w7/cw7ixmddbiougcsosjfpymf64knhp2sq2whp2ma5buej5tzcftzy.py # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2], Original ATen: [aten.div, aten.add] # Source node to ATen node mapping: # attention_scores_1 => div # attention_scores_2 => add # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {}) # %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_8), kwargs = {}) triton_poi_fused_add_div_1 = async_compile.triton('triton_poi_fused_add_div_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/kz/ckzqylporms4fvgcrqg44ypprwpanp6hf222rji24wskr3b44aga.py # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4d/c4dndrlfjcamjfnn3ng5agjc3ahefdgw6jcsnn6hm4ljwpbfbe7h.py # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention_probs => div_1, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/5k/c5kufnc7mciff7by75wm2btl7xamphqljghinmvgmksxfleox4tp.py # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # context_layer_1 => clone_4 # Graph fragment: # %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_scores], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [attention_scores_1, attention_scores_2], Original ATen: [aten.div, aten.add] triton_poi_fused_add_div_1.run(buf6, primals_8, 256, grid=grid(256), stream=stream0) del primals_8 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf6, buf7, 256, grid=grid(256), stream=stream0) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attention_probs], Original ATen: [aten._softmax] triton_poi_fused__softmax_3.run(buf7, buf8, 256, grid=grid(256), stream=stream0) del buf7 buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf2, primals_7, buf9, 16, 4, grid=grid(16, 4), stream=stream0) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [context_layer], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone] triton_poi_fused_clone_4.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0) del buf10 return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), buf6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch from torch import nn class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer, attention_scores def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_add_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_div_1[grid(256)](buf6, primals_8, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_8 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(256)](buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf7 buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf10 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), buf6, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
MingjieWang0606/2021-Sohu-Text-Matching-TOP2
BertSelfAttention
false
18,080
[ "MIT" ]
5
830a286cc978cb285cb63ae5a457e1d3813fa68a
https://github.com/MingjieWang0606/2021-Sohu-Text-Matching-TOP2/tree/830a286cc978cb285cb63ae5a457e1d3813fa68a
Color_MNIST_CNN
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/xq/cxqxvwhuevcb5oe7gsgfej3rmce7mdc6vqg3mkviccgugis2c7ro.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/az/cazvf33aclbntgyixs3zlm6bdzs672xtry2xl6pc3l3sjzwrnks5.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/xs/cxsarnw2wm2gid2judloczqftyialh3etpmvbejw7tuglcm5m2ir.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/fh/cfhbha4jjott434uisf5mi7hyc6aabnyin37w2nsewlcpyvjmf25.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16384 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/em/cemohvboy4oonyaecvinzgpaccxh3jyjsidoq3oihbn6wgjrmans.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/gm/cgmvzbmjoegavmcrd2a5ilv5aofujjravwbhn72egiaeoruqmcr5.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_5 = async_compile.triton('triton_per_fused_native_group_norm_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[8192, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 5, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_5(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 8192 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 32 x1 = (xindex // 32) % 64 x2 = (xindex // 2048) x4 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*((r3 + (128*x1)) % 4096)) + (262144*x2) + ((r3 + (128*x1)) // 4096)), None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + (x4), tmp10, None) tl.store(out_ptr1 + (x4), tmp15, None) tl.store(out_ptr2 + (x4), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/se/cse2shh7hxsyaogwxpnsuwf5rdjupauyqltmv6yjwvlvmcs2hhqi.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_6 = async_compile.triton('triton_per_fused_native_group_norm_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[128, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 128 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 32 x1 = (xindex // 32) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (32*r2) + (2048*x1)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (32*r2) + (2048*x1)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + (32*r2) + (2048*x1)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + (x3), tmp13, xmask) tl.store(out_ptr1 + (x3), tmp14, xmask) tl.store(out_ptr2 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/7i/c7id3rlum3qo656y5m7e3xmlczrwfrb3dr6imgeo47tn4zr2zjkf.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_per_fused_native_group_norm_7 = async_compile.triton('triton_per_fused_native_group_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 32 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + (4*x0)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + (x0), tmp20, xmask) tl.store(out_ptr0 + (x0), tmp13, xmask) tl.store(out_ptr1 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/it/citpybvb4gozb2c6pt7adce7v277lx3w7taovaeb4xk6twbnwheo.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_2 => add_1, mul_1 # Graph fragment: # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {}) triton_poi_fused_native_group_norm_8 = async_compile.triton('triton_poi_fused_native_group_norm_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = (xindex // 262144) tmp0 = tl.load(in_ptr0 + (x3), None) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 8)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 32768.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/si/csimcqcgzvpe37zp767u3whmgxl53mhrkyyxo4xgviagwt2m2n2x.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x_3 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %primals_6, %primals_7, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_9 = async_compile.triton('triton_poi_fused_convolution_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/mq/cmqa7c3tznjmav6edyxfyriavunfcccmadpqiktubinjsakwjgnf.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_10 = async_compile.triton('triton_per_fused_native_group_norm_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4096, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 5, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4096 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 16 x1 = (xindex // 16) % 64 x2 = (xindex // 1024) x4 = xindex tmp0 = tl.load(in_ptr0 + ((8*x0) + (128*((r3 + (128*x1)) % 1024)) + (131072*x2) + ((r3 + (128*x1)) // 1024)), None, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + (x4), tmp10, None) tl.store(out_ptr1 + (x4), tmp15, None) tl.store(out_ptr2 + (x4), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/u3/cu3mmpywm3oijijlid2v27jyxdh5yzjzfxn6fm232l4ohehqnps3.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) triton_per_fused_native_group_norm_11 = async_compile.triton('triton_per_fused_native_group_norm_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[64, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 3, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = (xindex // 16) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + (x3), tmp13, xmask) tl.store(out_ptr1 + (x3), tmp14, xmask) tl.store(out_ptr2 + (x3), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ed/cedfksrnqvpkt7jd35xrx3qu7keplge5lohbhs5xvje6f24gb3we.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => add_2, rsqrt_1, var_mean_1 # Graph fragment: # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {}) triton_per_fused_native_group_norm_12 = async_compile.triton('triton_per_fused_native_group_norm_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[32, 2], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_native_group_norm_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 32 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (2*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (2*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + (2*x0)), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tmp16 = 16384.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + (x0), tmp20, xmask) tl.store(out_ptr0 + (x0), tmp13, xmask) tl.store(out_ptr1 + (x0), tmp14, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/4j/c4jcuwmyoijsshsdirw7j54z7p375g6lbhcyugbihpcmpz2ud3gz.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] # Source node to ATen node mapping: # x_5 => add_3, mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {}) # %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {}) triton_poi_fused_native_group_norm_13 = async_compile.triton('triton_poi_fused_native_group_norm_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_group_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x2 = (xindex // 131072) tmp0 = tl.load(in_ptr0 + (x3), None) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + (x3), tmp15, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/66/c66exsyfbegagfnlmwef7ywlriejqdhkmkt4btgznuisag7vi7gb.py # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] # Source node to ATen node mapping: # x_11 => add_7, mul_7 # x_12 => mean # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_7, [-1, -2], True), kwargs = {}) triton_red_fused_mean_native_group_norm_14 = async_compile.triton('triton_red_fused_mean_native_group_norm_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4096, 128], reduction_hint=ReductionHint.OUTER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mean_native_group_norm_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mean_native_group_norm_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4096 rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 128 x4 = (xindex // 128) x2 = (xindex // 1024) tmp3 = tl.load(in_ptr1 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + ((8*x2) + (x0 // 16)), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last') _tmp17 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (128*r3) + (4096*((r3 % 32) // 32)) + (16384*x4)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = _tmp17 + tmp16 _tmp17 = tl.where(rmask, tmp18, _tmp17) tmp17 = tl.sum(_tmp17, 1)[:, None] tl.store(out_ptr0 + (x5), tmp17, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/i7/ci72nb3ysquj3dob72gnk2zp6taxi35fdd2zuplhtfa7sjjojugb.py # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] # Source node to ATen node mapping: # x_11 => add_7, mul_7 # x_12 => mean # Graph fragment: # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_7, [-1, -2], True), kwargs = {}) triton_per_fused_mean_native_group_norm_15 = async_compile.triton('triton_per_fused_mean_native_group_norm_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[512, 8], reduction_hint=ReductionHint.OUTER_TINY, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_native_group_norm_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_native_group_norm_15(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 512 rnumel = 8 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 128 x1 = (xindex // 128) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*r2) + (1024*x1)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1024.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64, ), (1, )) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (128, ), (1, )) assert_size_stride(primals_9, (128, ), (1, )) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (128, ), (1, )) assert_size_stride(primals_13, (128, ), (1, )) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128, ), (1, )) assert_size_stride(primals_16, (128, ), (1, )) assert_size_stride(primals_17, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_6, buf2, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_10, buf3, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_14, buf4, 16384, 9, grid=grid(16384, 9), stream=stream0) del primals_14 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] triton_poi_fused_convolution_4.run(buf6, primals_2, 1048576, grid=grid(1048576), stream=stream0) del primals_2 buf7 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf8 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf9 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_5.run(buf6, buf7, buf8, buf9, 8192, 128, grid=grid(8192), stream=stream0) buf10 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf11 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf12 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_6.run(buf7, buf8, buf9, buf10, buf11, buf12, 128, 64, grid=grid(128), stream=stream0) del buf7 del buf8 del buf9 buf13 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf14 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf17 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_7.run(buf10, buf11, buf12, buf13, buf14, buf17, 32, 4, grid=grid(32), stream=stream0) del buf10 del buf11 del buf12 buf16 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_8.run(buf6, buf13, buf14, primals_4, primals_5, buf16, 1048576, grid=grid(1048576), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf18 = extern_kernels.convolution(buf16, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] triton_poi_fused_convolution_9.run(buf19, primals_7, 524288, grid=grid(524288), stream=stream0) del primals_7 buf20 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf21 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf22 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf19, buf20, buf21, buf22, 4096, 128, grid=grid(4096), stream=stream0) buf23 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf24 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf25 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_11.run(buf20, buf21, buf22, buf23, buf24, buf25, 64, 64, grid=grid(64), stream=stream0) buf26 = buf14; del buf14 # reuse buf27 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf30 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_12.run(buf23, buf24, buf25, buf26, buf27, buf30, 32, 2, grid=grid(32), stream=stream0) buf29 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_13.run(buf19, buf26, buf27, primals_8, primals_9, buf29, 524288, grid=grid(524288), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] buf31 = extern_kernels.convolution(buf29, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf32 = buf31; del buf31 # reuse # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution] triton_poi_fused_convolution_9.run(buf32, primals_11, 524288, grid=grid(524288), stream=stream0) del primals_11 buf33 = buf22; del buf22 # reuse buf34 = buf21; del buf21 # reuse buf35 = buf20; del buf20 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf32, buf33, buf34, buf35, 4096, 128, grid=grid(4096), stream=stream0) buf36 = buf25; del buf25 # reuse buf37 = buf24; del buf24 # reuse buf38 = buf23; del buf23 # reuse # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_11.run(buf33, buf34, buf35, buf36, buf37, buf38, 64, 64, grid=grid(64), stream=stream0) buf39 = buf27; del buf27 # reuse buf40 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf43 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_12.run(buf36, buf37, buf38, buf39, buf40, buf43, 32, 2, grid=grid(32), stream=stream0) buf42 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.native_group_norm] triton_poi_fused_native_group_norm_13.run(buf32, buf39, buf40, primals_12, primals_13, buf42, 524288, grid=grid(524288), stream=stream0) del primals_13 # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] buf44 = extern_kernels.convolution(buf42, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf45 = buf44; del buf44 # reuse # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution] triton_poi_fused_convolution_9.run(buf45, primals_15, 524288, grid=grid(524288), stream=stream0) del primals_15 buf46 = buf35; del buf35 # reuse buf47 = buf34; del buf34 # reuse buf48 = buf33; del buf33 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_10.run(buf45, buf46, buf47, buf48, 4096, 128, grid=grid(4096), stream=stream0) buf49 = buf38; del buf38 # reuse buf50 = buf37; del buf37 # reuse buf51 = buf36; del buf36 # reuse # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_11.run(buf46, buf47, buf48, buf49, buf50, buf51, 64, 64, grid=grid(64), stream=stream0) del buf46 del buf47 buf52 = buf40; del buf40 # reuse buf53 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf55 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.native_group_norm] triton_per_fused_native_group_norm_12.run(buf49, buf50, buf51, buf52, buf53, buf55, 32, 2, grid=grid(32), stream=stream0) del buf49 del buf50 del buf51 buf56 = reinterpret_tensor(buf48, (4, 128, 1, 1, 8), (1024, 1, 4096, 4096, 128), 0); del buf48 # reuse # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] triton_red_fused_mean_native_group_norm_14.run(buf45, buf52, buf53, primals_16, primals_17, buf56, 4096, 128, grid=grid(4096), stream=stream0) del buf53 del primals_17 buf57 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf58 = buf57; del buf57 # reuse # Topologically Sorted Source Nodes: [x_11, x_12], Original ATen: [aten.native_group_norm, aten.mean] triton_per_fused_mean_native_group_norm_15.run(buf58, buf56, 512, 8, grid=grid(512), stream=stream0) del buf56 return (reinterpret_tensor(buf58, (4, 128), (128, 1), 0), buf0, buf1, primals_4, buf2, primals_8, buf3, primals_12, buf4, primals_16, buf6, buf16, reinterpret_tensor(buf13, (4, 8), (8, 1), 0), reinterpret_tensor(buf17, (4, 8), (8, 1), 0), buf19, buf29, reinterpret_tensor(buf26, (4, 8), (8, 1), 0), reinterpret_tensor(buf30, (4, 8), (8, 1), 0), buf32, buf42, reinterpret_tensor(buf39, (4, 8), (8, 1), 0), reinterpret_tensor(buf43, (4, 8), (8, 1), 0), buf45, reinterpret_tensor(buf52, (4, 8), (8, 1), 0), reinterpret_tensor(buf55, (4, 8), (8, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class Color_MNIST_CNN(nn.Module): def __init__(self): super(Color_MNIST_CNN, self).__init__() self.conv1 = nn.Conv2d(3, 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.bn0(x) x = self.conv2(x) x = F.relu(x) x = self.bn1(x) x = self.conv3(x) x = F.relu(x) x = self.bn2(x) x = self.conv4(x) x = F.relu(x) x = self.bn3(x) x = self.avgpool(x) x = x.view(len(x), -1) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_5(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 32 x1 = xindex // 32 % 64 x2 = xindex // 2048 x4 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * ((r3 + 128 * x1) % 4096) + 262144 * x2 + (r3 + 128 * x1) // 4096), None, eviction_policy= 'evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x4, tmp10, None) tl.store(out_ptr1 + x4, tmp15, None) tl.store(out_ptr2 + x4, tmp9, None) @triton.jit def triton_per_fused_native_group_norm_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 128 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 32 x1 = xindex // 32 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 32 * r2 + 2048 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_group_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 4 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 32768.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 64 x2 = xindex // 262144 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 8), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 32768.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_poi_fused_convolution_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) @triton.jit def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r3 = rindex x0 = xindex % 16 x1 = xindex // 16 % 64 x2 = xindex // 1024 x4 = xindex tmp0 = tl.load(in_ptr0 + (8 * x0 + 128 * ((r3 + 128 * x1) % 1024) + 131072 * x2 + (r3 + 128 * x1) // 1024), None, eviction_policy= 'evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 128, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp3 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x4, tmp10, None) tl.store(out_ptr1 + x4, tmp15, None) tl.store(out_ptr2 + x4, tmp9, None) @triton.jit def triton_per_fused_native_group_norm_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 16 x1 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp15 = tmp12[:, None] tl.store(out_ptr0 + x3, tmp13, xmask) tl.store(out_ptr1 + x3, tmp14, xmask) tl.store(out_ptr2 + x3, tmp15, xmask) @triton.jit def triton_per_fused_native_group_norm_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 32 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 2 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 2 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1 + 2 * x0), xmask, other=0.0) tmp3 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp3, 0) tmp8 = tl.where(xmask, tmp4, 0) tmp9 = tl.where(xmask, tmp5, 0) tmp10, tmp11, tmp12 = triton_helpers.welford(tmp7, tmp8, tmp9, 1) tmp13 = tmp10[:, None] tmp14 = tmp11[:, None] tmp12[:, None] tmp16 = 16384.0 tmp17 = tmp14 / tmp16 tmp18 = 1e-05 tmp19 = tmp17 + tmp18 tmp20 = libdevice.rsqrt(tmp19) tl.store(out_ptr2 + x0, tmp20, xmask) tl.store(out_ptr0 + x0, tmp13, xmask) tl.store(out_ptr1 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_native_group_norm_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 128 x2 = xindex // 131072 tmp0 = tl.load(in_ptr0 + x3, None) tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tl.store(out_ptr0 + x3, tmp15, None) @triton.jit def triton_red_fused_mean_native_group_norm_14(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 128 x4 = xindex // 128 x2 = xindex // 1024 tmp3 = tl.load(in_ptr1 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr2 + (8 * x2 + x0 // 16), None, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') _tmp17 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x5 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r3 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * r3 + 4096 * (r3 % 32 // 32) + 16384 * x4), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = 16384.0 tmp7 = tmp5 / tmp6 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.rsqrt(tmp9) tmp11 = tmp4 * tmp10 tmp13 = tmp11 * tmp12 tmp15 = tmp13 + tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = _tmp17 + tmp16 _tmp17 = tl.where(rmask, tmp18, _tmp17) tmp17 = tl.sum(_tmp17, 1)[:, None] tl.store(out_ptr0 + x5, tmp17, None) @triton.jit def triton_per_fused_mean_native_group_norm_15(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 512 RBLOCK: tl.constexpr = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 128 x1 = xindex // 128 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * r2 + 1024 * x1), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 1024.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (64,), (1,)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (128,), (1,)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128,), (1,)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128,), (1,)) assert_size_stride(primals_17, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_6, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf3 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(16384, 9)](primals_10, buf3, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(16384, 9)](primals_14, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf6 = buf5 del buf5 triton_poi_fused_convolution_4[grid(1048576)](buf6, primals_2, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf7 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf8 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) buf9 = empty_strided_cuda((4, 8, 1, 1, 4, 64), (2048, 4, 8192, 8192, 1, 32), torch.float32) triton_per_fused_native_group_norm_5[grid(8192)](buf6, buf7, buf8, buf9, 8192, 128, XBLOCK=8, num_warps=8, num_stages=1) buf10 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf11 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) buf12 = empty_strided_cuda((4, 8, 1, 1, 4), (32, 4, 128, 128, 1), torch.float32) triton_per_fused_native_group_norm_6[grid(128)](buf7, buf8, buf9, buf10, buf11, buf12, 128, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf7 del buf8 del buf9 buf13 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf14 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf17 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_7[grid(32)](buf10, buf11, buf12, buf13, buf14, buf17, 32, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf10 del buf11 del buf12 buf16 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_native_group_norm_8[grid(1048576)](buf6, buf13, buf14, primals_4, primals_5, buf16, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf18 = extern_kernels.convolution(buf16, buf2, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf19 = buf18 del buf18 triton_poi_fused_convolution_9[grid(524288)](buf19, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf20 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf21 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) buf22 = empty_strided_cuda((4, 8, 1, 1, 2, 64), (1024, 2, 4096, 4096, 1, 16), torch.float32) triton_per_fused_native_group_norm_10[grid(4096)](buf19, buf20, buf21, buf22, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf23 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf24 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) buf25 = empty_strided_cuda((4, 8, 1, 1, 2), (16, 2, 64, 64, 1), torch.float32) triton_per_fused_native_group_norm_11[grid(64)](buf20, buf21, buf22, buf23, buf24, buf25, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) buf26 = buf14 del buf14 buf27 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf30 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_12[grid(32)](buf23, buf24, buf25, buf26, buf27, buf30, 32, 2, XBLOCK=1, num_warps=2, num_stages=1) buf29 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_native_group_norm_13[grid(524288)](buf19, buf26, buf27, primals_8, primals_9, buf29, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf31 = extern_kernels.convolution(buf29, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf32 = buf31 del buf31 triton_poi_fused_convolution_9[grid(524288)](buf32, primals_11, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf33 = buf22 del buf22 buf34 = buf21 del buf21 buf35 = buf20 del buf20 triton_per_fused_native_group_norm_10[grid(4096)](buf32, buf33, buf34, buf35, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf36 = buf25 del buf25 buf37 = buf24 del buf24 buf38 = buf23 del buf23 triton_per_fused_native_group_norm_11[grid(64)](buf33, buf34, buf35, buf36, buf37, buf38, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) buf39 = buf27 del buf27 buf40 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf43 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_12[grid(32)](buf36, buf37, buf38, buf39, buf40, buf43, 32, 2, XBLOCK=1, num_warps=2, num_stages=1) buf42 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_native_group_norm_13[grid(524288)](buf32, buf39, buf40, primals_12, primals_13, buf42, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf44 = extern_kernels.convolution(buf42, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf44, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf45 = buf44 del buf44 triton_poi_fused_convolution_9[grid(524288)](buf45, primals_15, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf46 = buf35 del buf35 buf47 = buf34 del buf34 buf48 = buf33 del buf33 triton_per_fused_native_group_norm_10[grid(4096)](buf45, buf46, buf47, buf48, 4096, 128, XBLOCK=8, num_warps=8, num_stages=1) buf49 = buf38 del buf38 buf50 = buf37 del buf37 buf51 = buf36 del buf36 triton_per_fused_native_group_norm_11[grid(64)](buf46, buf47, buf48, buf49, buf50, buf51, 64, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf46 del buf47 buf52 = buf40 del buf40 buf53 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) buf55 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 32, 32), torch.float32) triton_per_fused_native_group_norm_12[grid(32)](buf49, buf50, buf51, buf52, buf53, buf55, 32, 2, XBLOCK=1, num_warps=2, num_stages=1) del buf49 del buf50 del buf51 buf56 = reinterpret_tensor(buf48, (4, 128, 1, 1, 8), (1024, 1, 4096, 4096, 128), 0) del buf48 triton_red_fused_mean_native_group_norm_14[grid(4096)](buf45, buf52, buf53, primals_16, primals_17, buf56, 4096, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) del buf53 del primals_17 buf57 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 512, 512), torch.float32) buf58 = buf57 del buf57 triton_per_fused_mean_native_group_norm_15[grid(512)](buf58, buf56, 512, 8, XBLOCK=64, num_warps=4, num_stages=1) del buf56 return (reinterpret_tensor(buf58, (4, 128), (128, 1), 0), buf0, buf1, primals_4, buf2, primals_8, buf3, primals_12, buf4, primals_16, buf6, buf16, reinterpret_tensor(buf13, (4, 8), (8, 1), 0), reinterpret_tensor(buf17, (4, 8), (8, 1), 0), buf19, buf29, reinterpret_tensor(buf26, (4, 8), (8, 1), 0), reinterpret_tensor( buf30, (4, 8), (8, 1), 0), buf32, buf42, reinterpret_tensor(buf39, (4, 8), (8, 1), 0), reinterpret_tensor(buf43, (4, 8), (8, 1), 0), buf45, reinterpret_tensor(buf52, (4, 8), (8, 1), 0), reinterpret_tensor(buf55, (4, 8), (8, 1), 0)) class Color_MNIST_CNNNew(nn.Module): def __init__(self): super(Color_MNIST_CNNNew, self).__init__() self.conv1 = nn.Conv2d(3, 64, 3, 1, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1) self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1) self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1) self.bn0 = nn.GroupNorm(8, 64) self.bn1 = nn.GroupNorm(8, 128) self.bn2 = nn.GroupNorm(8, 128) self.bn3 = nn.GroupNorm(8, 128) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_10 = self.conv3.weight primals_8 = self.conv3.bias primals_14 = self.conv4.weight primals_9 = self.conv4.bias primals_4 = self.bn0.weight primals_5 = self.bn0.bias primals_11 = self.bn1.weight primals_12 = self.bn1.bias primals_13 = self.bn2.weight primals_15 = self.bn2.bias primals_16 = self.bn3.weight primals_17 = self.bn3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
VinAIResearch/mDSDI
Color_MNIST_CNN
false
18,081
[ "Apache-2.0" ]
9
8ec49085d8389ab490ec633c3ae4bf66be085366
https://github.com/VinAIResearch/mDSDI/tree/8ec49085d8389ab490ec633c3ae4bf66be085366
AngularMarginLoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/54/c54anezm5tp7djzvtp7bcl42ylzihy5446xuvla3ymvra5m7c4ma.py # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] # Source node to ATen node mapping: # normalize => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cr/ccrdogiv2orugt6w3w3bztb54eh53lfe3fppq7oorjzy5eeahkts.py # Topologically Sorted Source Nodes: [inputs_norms], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # inputs_norms => pow_3, pow_4, sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {}) # %pow_4 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {}) triton_poi_fused_linalg_vector_norm_1 = async_compile.triton('triton_poi_fused_linalg_vector_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_linalg_vector_norm_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/u5/cu5zlnrhpnawrbhogcvzw7oz3p7z2cmiyks7ehdogesmyv7t6g3m.py # Topologically Sorted Source Nodes: [repeat, normalized_inputs], Original ATen: [aten.repeat, aten.div] # Source node to ATen node mapping: # normalized_inputs => div_1 # repeat => repeat # Graph fragment: # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%unsqueeze, [1, 4]), kwargs = {}) # %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %repeat), kwargs = {}) triton_poi_fused_div_repeat_2 = async_compile.triton('triton_poi_fused_div_repeat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_repeat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nm/cnmmnrzhfy2haha4morripjjr4a5ubr2k7c3uz4okykzk7z2prjg.py # Topologically Sorted Source Nodes: [cosines], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] # Source node to ATen node mapping: # cosines => clamp_max, clamp_min_1 # Graph fragment: # %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mm, -1), kwargs = {}) # %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1), kwargs = {}) # %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%mm, -1), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%mm, 1), kwargs = {}) # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%ge, %le), kwargs = {}) triton_poi_fused_clamp_ge_le_logical_and_3 = async_compile.triton('triton_poi_fused_clamp_ge_le_logical_and_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_le_logical_and_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp0 >= tmp1 tmp6 = tmp0 <= tmp3 tmp7 = tmp5 & tmp6 tl.store(out_ptr0 + (x0), tmp4, xmask) tl.store(out_ptr1 + (x0), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/dg/cdg6pnfyxx7mz7inxwzv7troucbex2skkoiouefdbgg6wuas66tl.py # Topologically Sorted Source Nodes: [preds], Original ATen: [aten.argmax] # Source node to ATen node mapping: # preds => argmax # Graph fragment: # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%clamp_max, 1), kwargs = {}) triton_poi_fused_argmax_4 = async_compile.triton('triton_poi_fused_argmax_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_argmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + (x0), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/rg/crgithcqziqbitt4ee2pur3cbpw3cnqjsmnf27tha7alxoax3wkm.py # Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index] # Source node to ATen node mapping: # getitem => index # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%permute_1, [%primals_3]), kwargs = {}) triton_poi_fused_index_5 = async_compile.triton('triton_poi_fused_index_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp7 = tl.load(in_ptr2 + (tmp4 + (4*x0)), xmask, eviction_policy='evict_last') tmp8 = libdevice.acos(tmp7) tmp9 = 1.0 tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tmp10 + tmp11 tmp13 = tl_math.cos(tmp12) tmp14 = tmp13 - tmp11 tmp15 = tmp6 * tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [normalize], Original ATen: [aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0) buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [inputs_norms], Original ATen: [aten.linalg_vector_norm] triton_poi_fused_linalg_vector_norm_1.run(primals_2, buf1, 4, grid=grid(4), stream=stream0) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [repeat, normalized_inputs], Original ATen: [aten.repeat, aten.div] triton_poi_fused_div_repeat_2.run(primals_2, buf1, buf2, 16, grid=grid(16), stream=stream0) del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(buf2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) # Topologically Sorted Source Nodes: [cosines], Original ATen: [aten.clamp, aten.ge, aten.le, aten.logical_and] triton_poi_fused_clamp_ge_le_logical_and_3.run(buf3, buf4, buf7, 16, grid=grid(16), stream=stream0) buf5 = empty_strided_cuda((4, ), (1, ), torch.int64) # Topologically Sorted Source Nodes: [preds], Original ATen: [aten.argmax] triton_poi_fused_argmax_4.run(buf4, buf5, 4, grid=grid(4), stream=stream0) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index] triton_poi_fused_index_5.run(primals_3, buf1, buf4, buf6, 16, grid=grid(16), stream=stream0) # Topologically Sorted Source Nodes: [], Original ATen: [] buf8 = torch.ops.aten.set_.source_Tensor(primals_1, buf0) assert_size_stride(buf8, (4, 4), (4, 1)) del buf3 del primals_1 return (reinterpret_tensor(buf6, (4, ), (5, ), 0), buf5, buf4, buf1, buf2, primals_3, buf1, buf2, buf4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class MetricLearningLoss(nn.Module): """ Generic loss function to be used in a metric learning setting """ def __init__(self, embedding_size, n_classes, device='cpu', *args, **kwargs ): super(MetricLearningLoss, self).__init__() self.embedding_size = embedding_size self.n_classes = n_classes self.device = device def forward(self, inputs, targets): raise NotImplementedError() class AngularMarginLoss(MetricLearningLoss): """ Generic angular margin loss definition (see https://github.com/cvqluu/Angular-Penalty-Softmax-Losses-Pytorch) "ElasticFace: Elastic Margin Loss for Deep Face Recognition", Boutros et al., https://arxiv.org/abs/2109.09416v2 """ def __init__(self, embedding_size, n_classes, device='cpu', scale=None, m1=1, m2=0, m3=0, eps=1e-06): super(AngularMarginLoss, self).__init__(embedding_size, n_classes, device=device) self.fc = nn.Linear(embedding_size, n_classes, bias=False) self.scale = scale self.m1 = m1 self.m2 = m2 self.m3 = m3 self.eps = eps def forward(self, inputs, targets): """ Compute ArcFace loss for inputs of shape [B, E] and targets of size [B] B: batch size E: embedding size """ self.fc.weight.data = F.normalize(self.fc.weight.data, p=2, dim=1) inputs_norms = torch.norm(inputs, p=2, dim=1) normalized_inputs = inputs / inputs_norms.unsqueeze(-1).repeat(1, inputs.size(1)) scales = torch.tensor([self.scale], device=inputs.device).repeat(inputs .size(0)) if self.scale is not None else inputs_norms cosines = self.fc(normalized_inputs).clamp(-1, 1) preds = torch.argmax(cosines, dim=1) angles = torch.arccos(cosines) numerator = scales.unsqueeze(-1) * (torch.cos(self.m1 * angles + self.m2) - self.m3) numerator = torch.diagonal(numerator.transpose(0, 1)[targets]) excluded = torch.cat([(scales[i] * torch.cat((cosines[i, :y], cosines[i, y + 1:])).unsqueeze(0)) for i, y in enumerate( targets)], dim=0) denominator = torch.exp(numerator) + torch.sum(torch.exp(excluded), dim=1) loss = -torch.mean(numerator - torch.log(denominator + self.eps)) return normalized_inputs, preds, loss def get_inputs(): return [torch.rand([4, 4]), torch.ones([4], dtype=torch.int64)] def get_init_inputs(): return [[], {'embedding_size': 4, 'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_linalg_vector_norm_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tl.store(out_ptr0 + x0, tmp11, xmask) @triton.jit def triton_poi_fused_div_repeat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_clamp_ge_le_logical_and_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 1.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = tmp0 >= tmp1 tmp6 = tmp0 <= tmp3 tmp7 = tmp5 & tmp6 tl.store(out_ptr0 + x0, tmp4, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) @triton.jit def triton_poi_fused_argmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x0, tmp46, xmask) @triton.jit def triton_poi_fused_index_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp7 = tl.load(in_ptr2 + (tmp4 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp8 = libdevice.acos(tmp7) tmp9 = 1.0 tmp10 = tmp8 * tmp9 tmp11 = 0.0 tmp12 = tmp10 + tmp11 tmp13 = tl_math.cos(tmp12) tmp14 = tmp13 - tmp11 tmp15 = tmp6 * tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_linalg_vector_norm_1[grid(4)](primals_2, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_repeat_2[grid(16)](primals_2, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_clamp_ge_le_logical_and_3[grid(16)](buf3, buf4, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_argmax_4[grid(4)](buf4, buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_index_5[grid(16)](primals_3, buf1, buf4, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = torch.ops.aten.set_.source_Tensor(primals_1, buf0) assert_size_stride(buf8, (4, 4), (4, 1)) del buf3 del primals_1 return reinterpret_tensor(buf6, (4,), (5,), 0 ), buf5, buf4, buf1, buf2, primals_3, buf1, buf2, buf4, buf7 class MetricLearningLoss(nn.Module): """ Generic loss function to be used in a metric learning setting """ def __init__(self, embedding_size, n_classes, device='cpu', *args, **kwargs ): super(MetricLearningLoss, self).__init__() self.embedding_size = embedding_size self.n_classes = n_classes self.device = device def forward(self, inputs, targets): raise NotImplementedError() class AngularMarginLossNew(MetricLearningLoss): """ Generic angular margin loss definition (see https://github.com/cvqluu/Angular-Penalty-Softmax-Losses-Pytorch) "ElasticFace: Elastic Margin Loss for Deep Face Recognition", Boutros et al., https://arxiv.org/abs/2109.09416v2 """ def __init__(self, embedding_size, n_classes, device='cpu', scale=None, m1=1, m2=0, m3=0, eps=1e-06): super(AngularMarginLossNew, self).__init__(embedding_size, n_classes, device=device) self.fc = nn.Linear(embedding_size, n_classes, bias=False) self.scale = scale self.m1 = m1 self.m2 = m2 self.m3 = m3 self.eps = eps def forward(self, input_0, input_1): primals_1 = self.fc.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0], output[1], output[2]
Wadaboa/titanet
AngularMarginLoss
false
18,082
[ "MIT" ]
4
b07e3074e79ea8c1129fb0adb8315e06bb4943ea
https://github.com/Wadaboa/titanet/tree/b07e3074e79ea8c1129fb0adb8315e06bb4943ea
GatedTanh
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/zp/czpzm6lunwkpc7ohimppjszibgd7tfa7a3noumwzcrobpnuxt4fg.py # Topologically Sorted Source Nodes: [y_tilda, gated, y], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # gated => sigmoid # y => mul # y_tilda => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {}) triton_poi_fused_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_tilda, gated, y], Original ATen: [aten.tanh, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0.run(buf0, buf1, buf2, 256, grid=grid(256), stream=stream0) return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class GatedTanh(nn.Module): """ From: https://arxiv.org/pdf/1707.07998.pdf nonlinear_layer (f_a) : x\\in R^m => y \\in R^n ilda{y} = tanh(Wx + b) g = sigmoid(W'x + b') y = ilda(y) \\circ g input: (N, *, in_dim) output: (N, *, out_dim) """ def __init__(self, in_dim, out_dim): super(GatedTanh, self).__init__() self.fc = nn.Linear(in_dim, out_dim) self.gate_fc = nn.Linear(in_dim, out_dim) def forward(self, x): y_tilda = torch.tanh(self.fc(x)) gated = torch.sigmoid(self.gate_fc(x)) y = y_tilda * gated return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tl.sigmoid(tmp2) tmp4 = tmp1 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_tanh_0[grid(256)](buf0, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1 class GatedTanhNew(nn.Module): """ From: https://arxiv.org/pdf/1707.07998.pdf nonlinear_layer (f_a) : x\\in R^m => y \\in R^n ilda{y} = tanh(Wx + b) g = sigmoid(W'x + b') y = ilda(y) \\circ g input: (N, *, in_dim) output: (N, *, out_dim) """ def __init__(self, in_dim, out_dim): super(GatedTanhNew, self).__init__() self.fc = nn.Linear(in_dim, out_dim) self.gate_fc = nn.Linear(in_dim, out_dim) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_4 = self.gate_fc.weight primals_5 = self.gate_fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
VisualJoyce/ChengyuBERT
GatedTanh
false
18,083
[ "MIT" ]
8
605db3a4b3241dd4d02baa41a68bf23b5b00b36d
https://github.com/VisualJoyce/ChengyuBERT/tree/605db3a4b3241dd4d02baa41a68bf23b5b00b36d
CELoss
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/sq/csqxcx7bcw6fvapjw434bjqekti5qvrwgs4wowos4rx5yv2tvct5.py # Topologically Sorted Source Nodes: [preds], Original ATen: [aten.argmax] # Source node to ATen node mapping: # preds => argmax # Graph fragment: # %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%view_1, 1), kwargs = {}) triton_poi_fused_argmax_0 = async_compile.triton('triton_poi_fused_argmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + (x2), tmp46, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/im/cimjqfyrnbi2v4eg3hbj3b2rzjznianhv7isimhj2shrdgg2krgd.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # loss => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/hj/chjkf3zkduqkxxqa2hn3c3ceomf2ubihjigdg6qczuxbl3o263iq.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] # Source node to ATen node mapping: # loss => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %primals_4), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) triton_per_fused__log_softmax_div_mul_neg_sum_2 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qd/cqdef2s5voa43gmy5v24fadwe65ywcqhl5puvcwnmpib2dsbqw4q.py # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] # Source node to ATen node mapping: # inputs => div_1 # Graph fragment: # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_3, %expand), kwargs = {}) triton_poi_fused_div_3 = async_compile.triton('triton_poi_fused_div_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [preds], Original ATen: [aten.argmax] stream0 = get_raw_stream(0) triton_poi_fused_argmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf0, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((), (), torch.float32) buf5 = buf3; del buf3 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div] triton_per_fused__log_softmax_div_mul_neg_sum_2.run(buf5, buf2, primals_4, 1, 256, grid=grid(1), stream=stream0) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [inputs], Original ATen: [aten.div] triton_poi_fused_div_3.run(primals_3, buf4, 256, grid=grid(256), stream=stream0) return (buf4, buf1, buf5, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.nn as nn class MetricLearningLoss(nn.Module): """ Generic loss function to be used in a metric learning setting """ def __init__(self, embedding_size, n_classes, device='cpu', *args, **kwargs ): super(MetricLearningLoss, self).__init__() self.embedding_size = embedding_size self.n_classes = n_classes self.device = device def forward(self, inputs, targets): raise NotImplementedError() class CELoss(MetricLearningLoss): """ Cross-entropy loss with the addition of a linear layer to map inputs to the target number of classes """ def __init__(self, embedding_size, n_classes, device='cpu'): super(CELoss, self).__init__(embedding_size, n_classes, device=device) self.fc = nn.Linear(embedding_size, n_classes) def forward(self, inputs, targets): """ Compute cross-entropy loss for inputs of shape [B, E] and targets of size [B] B: batch size E: embedding size """ logits = self.fc(inputs) preds = torch.argmax(logits, dim=1) loss = F.cross_entropy(logits, targets) inputs = F.normalize(inputs, p=2, dim=1) return inputs, preds, loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'embedding_size': 4, 'n_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_argmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x2, tmp46, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None) @triton.jit def triton_poi_fused_div_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_argmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(256)](buf0, buf2, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((), (), torch.float32) buf5 = buf3 del buf3 triton_per_fused__log_softmax_div_mul_neg_sum_2[grid(1)](buf5, buf2, primals_4, 1, 256, num_warps=2, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_div_3[grid(256)](primals_3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf4, buf1, buf5, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class MetricLearningLoss(nn.Module): """ Generic loss function to be used in a metric learning setting """ def __init__(self, embedding_size, n_classes, device='cpu', *args, **kwargs ): super(MetricLearningLoss, self).__init__() self.embedding_size = embedding_size self.n_classes = n_classes self.device = device def forward(self, inputs, targets): raise NotImplementedError() class CELossNew(MetricLearningLoss): """ Cross-entropy loss with the addition of a linear layer to map inputs to the target number of classes """ def __init__(self, embedding_size, n_classes, device='cpu'): super(CELossNew, self).__init__(embedding_size, n_classes, device= device) self.fc = nn.Linear(embedding_size, n_classes) def forward(self, input_0, input_1): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1], output[2]
Wadaboa/titanet
CELoss
false
18,084
[ "MIT" ]
4
b07e3074e79ea8c1129fb0adb8315e06bb4943ea
https://github.com/Wadaboa/titanet/tree/b07e3074e79ea8c1129fb0adb8315e06bb4943ea
CrossEntropyLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/u2/cu2beycg2t2ghizs6f4qom7bxbxmajhdaakuyq6y2korxywhp6ba.py # Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # output => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/6y/c6yqx6y2l4x77lf42yz32g5yutuu76ggod5sfsraeeptlvp332df.py # Topologically Sorted Source Nodes: [long, max_1, nll_loss], Original ATen: [aten._to_copy, aten.max, aten.nll_loss2d_forward] # Source node to ATen node mapping: # long => convert_element_type # max_1 => max_1 # nll_loss => convert_element_type_1, div, full_default_1, ne_1, ne_2, neg, sum_2, sum_3, where_1 # Graph fragment: # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.int64), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%convert_element_type, 1), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%getitem_1, -100), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%getitem_1, -100), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {}) # %convert_element_type_1 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_2, torch.float32), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %convert_element_type_1), kwargs = {}) triton_per_fused__to_copy_max_nll_loss2d_forward_1 = async_compile.triton('triton_per_fused__to_copy_max_nll_loss2d_forward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_max_nll_loss2d_forward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__to_copy_max_nll_loss2d_forward_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp23 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp42 = tl.load(in_ptr1 + (r0 + (64*r1)), None) tmp44 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None) tmp47 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None) tmp50 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None) tmp1 = tmp0.to(tl.int64) tmp3 = tmp2.to(tl.int64) tmp4 = tmp1 > tmp3 tmp5 = tmp1 == tmp3 tmp6 = tl.full([1, 1], 0, tl.int64) tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp6 < tmp7 tmp9 = tmp5 & tmp8 tmp10 = tmp4 | tmp9 tmp11 = tl.where(tmp10, tmp1, tmp3) tmp12 = tl.where(tmp10, tmp6, tmp7) tmp14 = tmp13.to(tl.int64) tmp15 = tmp11 > tmp14 tmp16 = tmp11 == tmp14 tmp17 = tl.full([1, 1], 2, tl.int64) tmp18 = tmp12 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp15 | tmp19 tmp21 = tl.where(tmp20, tmp11, tmp14) tmp22 = tl.where(tmp20, tmp12, tmp17) tmp24 = tmp23.to(tl.int64) tmp25 = tmp21 > tmp24 tmp26 = tmp21 == tmp24 tmp27 = tl.full([1, 1], 3, tl.int64) tmp28 = tmp22 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tmp25 | tmp29 tmp31 = tl.where(tmp30, tmp21, tmp24) tmp32 = tl.where(tmp30, tmp22, tmp27) tmp33 = tl.full([1, 1], -100, tl.int64) tmp34 = tmp32 != tmp33 tmp35 = tl.where(tmp34, tmp32, tmp6) tmp36 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp37 = tmp35 + tmp36 tmp38 = tmp35 < 0 tmp39 = tl.where(tmp38, tmp37, tmp35) tl.device_assert((0 <= tmp39) & (tmp39 < 4), "index out of bounds: 0 <= tmp39 < 4") tmp41 = tl.load(in_ptr1 + (r0 + (16*tmp39) + (64*r1)), None) tmp43 = tl_math.exp(tmp42) tmp45 = tl_math.exp(tmp44) tmp46 = tmp43 + tmp45 tmp48 = tl_math.exp(tmp47) tmp49 = tmp46 + tmp48 tmp51 = tl_math.exp(tmp50) tmp52 = tmp49 + tmp51 tmp53 = tl_math.log(tmp52) tmp54 = tmp41 - tmp53 tmp55 = -tmp54 tmp56 = 0.0 tmp57 = tl.where(tmp34, tmp55, tmp56) tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK]) tmp60 = tl.sum(tmp58, 1)[:, None] tmp61 = tmp34.to(tl.int64) tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = tl.sum(tmp62, 1)[:, None] tmp65 = tmp64.to(tl.float32) tmp66 = tmp60 / tmp65 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp66, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [long, max_1, nll_loss], Original ATen: [aten._to_copy, aten.max, aten.nll_loss2d_forward] triton_per_fused__to_copy_max_nll_loss2d_forward_1.run(buf4, arg0_1, buf1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del buf1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.utils.data import torch import torch.nn as nn class CrossEntropyLoss(nn.Module): def __init__(self, label_nc): super(CrossEntropyLoss, self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss2d() def forward(self, output, label): label = label.long().max(1)[1] output = self.softmax(output) return self.criterion(output, label) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'label_nc': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__to_copy_max_nll_loss2d_forward_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp13 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp23 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp42 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp44 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp47 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp50 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tmp0.to(tl.int64) tmp3 = tmp2.to(tl.int64) tmp4 = tmp1 > tmp3 tmp5 = tmp1 == tmp3 tmp6 = tl.full([1, 1], 0, tl.int64) tmp7 = tl.full([1, 1], 1, tl.int64) tmp8 = tmp6 < tmp7 tmp9 = tmp5 & tmp8 tmp10 = tmp4 | tmp9 tmp11 = tl.where(tmp10, tmp1, tmp3) tmp12 = tl.where(tmp10, tmp6, tmp7) tmp14 = tmp13.to(tl.int64) tmp15 = tmp11 > tmp14 tmp16 = tmp11 == tmp14 tmp17 = tl.full([1, 1], 2, tl.int64) tmp18 = tmp12 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tmp15 | tmp19 tmp21 = tl.where(tmp20, tmp11, tmp14) tmp22 = tl.where(tmp20, tmp12, tmp17) tmp24 = tmp23.to(tl.int64) tmp25 = tmp21 > tmp24 tmp26 = tmp21 == tmp24 tmp27 = tl.full([1, 1], 3, tl.int64) tmp28 = tmp22 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tmp25 | tmp29 tl.where(tmp30, tmp21, tmp24) tmp32 = tl.where(tmp30, tmp22, tmp27) tmp33 = tl.full([1, 1], -100, tl.int64) tmp34 = tmp32 != tmp33 tmp35 = tl.where(tmp34, tmp32, tmp6) tmp36 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp37 = tmp35 + tmp36 tmp38 = tmp35 < 0 tmp39 = tl.where(tmp38, tmp37, tmp35) tl.device_assert((0 <= tmp39) & (tmp39 < 4), 'index out of bounds: 0 <= tmp39 < 4') tmp41 = tl.load(in_ptr1 + (r0 + 16 * tmp39 + 64 * r1), None) tmp43 = tl_math.exp(tmp42) tmp45 = tl_math.exp(tmp44) tmp46 = tmp43 + tmp45 tmp48 = tl_math.exp(tmp47) tmp49 = tmp46 + tmp48 tmp51 = tl_math.exp(tmp50) tmp52 = tmp49 + tmp51 tmp53 = tl_math.log(tmp52) tmp54 = tmp41 - tmp53 tmp55 = -tmp54 tmp56 = 0.0 tmp57 = tl.where(tmp34, tmp55, tmp56) tmp58 = tl.broadcast_to(tmp57, [XBLOCK, RBLOCK]) tmp60 = tl.sum(tmp58, 1)[:, None] tmp61 = tmp34.to(tl.int64) tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = tl.sum(tmp62, 1)[:, None] tmp65 = tmp64.to(tl.float32) tmp66 = tmp60 / tmp65 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp66, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 triton_per_fused__to_copy_max_nll_loss2d_forward_1[grid(1)](buf4, arg0_1, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del buf1 return buf4, class CrossEntropyLossNew(nn.Module): def __init__(self, label_nc): super(CrossEntropyLossNew, self).__init__() self.softmax = nn.LogSoftmax(dim=1) self.criterion = nn.NLLLoss2d() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
WeisiX/ITAS3D
CrossEntropyLoss
false
18,085
[ "MIT" ]
4
fc861e0cb2d4516905bfadab5e5e880c2b021832
https://github.com/WeisiX/ITAS3D/tree/fc861e0cb2d4516905bfadab5e5e880c2b021832
Mask_BN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/gw/cgwinrfro7i6rzwhk6fsjeingec3h5jmw252bsucbmwnwmxevsep.py # Topologically Sorted Source Nodes: [x_mask, sum_1, sum_2], Original ATen: [aten.ne, aten.sum] # Source node to ATen node mapping: # sum_1 => sum_1 # sum_2 => sum_2 # x_mask => ne # Graph fragment: # %ne : [num_users=3] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, 0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%ne, [3]), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [2]), kwargs = {}) triton_poi_fused_ne_sum_0 = async_compile.triton('triton_poi_fused_ne_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ne_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp51 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp55 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp59 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int64) tmp5 = tmp4 != tmp1 tmp6 = tmp5.to(tl.int64) tmp7 = tmp3 + tmp6 tmp9 = tmp8 != tmp1 tmp10 = tmp9.to(tl.int64) tmp11 = tmp7 + tmp10 tmp13 = tmp12 != tmp1 tmp14 = tmp13.to(tl.int64) tmp15 = tmp11 + tmp14 tmp17 = tmp16 != tmp1 tmp18 = tmp17.to(tl.int64) tmp20 = tmp19 != tmp1 tmp21 = tmp20.to(tl.int64) tmp22 = tmp18 + tmp21 tmp24 = tmp23 != tmp1 tmp25 = tmp24.to(tl.int64) tmp26 = tmp22 + tmp25 tmp28 = tmp27 != tmp1 tmp29 = tmp28.to(tl.int64) tmp30 = tmp26 + tmp29 tmp31 = tmp15 + tmp30 tmp33 = tmp32 != tmp1 tmp34 = tmp33.to(tl.int64) tmp36 = tmp35 != tmp1 tmp37 = tmp36.to(tl.int64) tmp38 = tmp34 + tmp37 tmp40 = tmp39 != tmp1 tmp41 = tmp40.to(tl.int64) tmp42 = tmp38 + tmp41 tmp44 = tmp43 != tmp1 tmp45 = tmp44.to(tl.int64) tmp46 = tmp42 + tmp45 tmp47 = tmp31 + tmp46 tmp49 = tmp48 != tmp1 tmp50 = tmp49.to(tl.int64) tmp52 = tmp51 != tmp1 tmp53 = tmp52.to(tl.int64) tmp54 = tmp50 + tmp53 tmp56 = tmp55 != tmp1 tmp57 = tmp56.to(tl.int64) tmp58 = tmp54 + tmp57 tmp60 = tmp59 != tmp1 tmp61 = tmp60.to(tl.int64) tmp62 = tmp58 + tmp61 tmp63 = tmp47 + tmp62 tl.store(out_ptr0 + (x0), tmp63, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/cp/ccpkt3bn7agbgulam3m44q7ckanvvx5zq6yirkss6enytt5u77uf.py # Topologically Sorted Source Nodes: [x_mask, mul, x_centralization, sum_4, none_zero_sum, mul_1, x_mean], Original ATen: [aten.ne, aten.mul, aten.sub, aten.sum, aten.div] # Source node to ATen node mapping: # mul => mul # mul_1 => mul_1 # none_zero_sum => sum_5 # sum_4 => sum_4 # x_centralization => sub # x_mask => ne # x_mean => div # Graph fragment: # %ne : [num_users=3] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ne, %unsqueeze), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mul), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub, [2]), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_4, [1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_5, %mul_1), kwargs = {}) triton_poi_fused_div_mul_ne_sub_sum_1 = async_compile.triton('triton_poi_fused_div_mul_ne_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_ne_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_ne_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask) tmp18 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask) tmp24 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp29 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask) tmp35 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask) tmp41 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask) tmp48 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp53 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask) tmp59 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask) tmp65 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask) tmp72 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp77 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask) tmp83 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask) tmp89 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask) tmp96 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp97 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp99 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp101 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp3 * tmp0 tmp5 = tmp0 - tmp4 tmp7 = tmp6 != tmp1 tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp6 tmp10 = tmp6 - tmp9 tmp11 = tmp5 + tmp10 tmp13 = tmp12 != tmp1 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 * tmp12 tmp16 = tmp12 - tmp15 tmp17 = tmp11 + tmp16 tmp19 = tmp18 != tmp1 tmp20 = tmp19.to(tl.float32) tmp21 = tmp20 * tmp18 tmp22 = tmp18 - tmp21 tmp23 = tmp17 + tmp22 tmp25 = tmp24 != tmp1 tmp26 = tmp25.to(tl.float32) tmp27 = tmp26 * tmp0 tmp28 = tmp24 - tmp27 tmp30 = tmp29 != tmp1 tmp31 = tmp30.to(tl.float32) tmp32 = tmp31 * tmp6 tmp33 = tmp29 - tmp32 tmp34 = tmp28 + tmp33 tmp36 = tmp35 != tmp1 tmp37 = tmp36.to(tl.float32) tmp38 = tmp37 * tmp12 tmp39 = tmp35 - tmp38 tmp40 = tmp34 + tmp39 tmp42 = tmp41 != tmp1 tmp43 = tmp42.to(tl.float32) tmp44 = tmp43 * tmp18 tmp45 = tmp41 - tmp44 tmp46 = tmp40 + tmp45 tmp47 = tmp23 + tmp46 tmp49 = tmp48 != tmp1 tmp50 = tmp49.to(tl.float32) tmp51 = tmp50 * tmp0 tmp52 = tmp48 - tmp51 tmp54 = tmp53 != tmp1 tmp55 = tmp54.to(tl.float32) tmp56 = tmp55 * tmp6 tmp57 = tmp53 - tmp56 tmp58 = tmp52 + tmp57 tmp60 = tmp59 != tmp1 tmp61 = tmp60.to(tl.float32) tmp62 = tmp61 * tmp12 tmp63 = tmp59 - tmp62 tmp64 = tmp58 + tmp63 tmp66 = tmp65 != tmp1 tmp67 = tmp66.to(tl.float32) tmp68 = tmp67 * tmp18 tmp69 = tmp65 - tmp68 tmp70 = tmp64 + tmp69 tmp71 = tmp47 + tmp70 tmp73 = tmp72 != tmp1 tmp74 = tmp73.to(tl.float32) tmp75 = tmp74 * tmp0 tmp76 = tmp72 - tmp75 tmp78 = tmp77 != tmp1 tmp79 = tmp78.to(tl.float32) tmp80 = tmp79 * tmp6 tmp81 = tmp77 - tmp80 tmp82 = tmp76 + tmp81 tmp84 = tmp83 != tmp1 tmp85 = tmp84.to(tl.float32) tmp86 = tmp85 * tmp12 tmp87 = tmp83 - tmp86 tmp88 = tmp82 + tmp87 tmp90 = tmp89 != tmp1 tmp91 = tmp90.to(tl.float32) tmp92 = tmp91 * tmp18 tmp93 = tmp89 - tmp92 tmp94 = tmp88 + tmp93 tmp95 = tmp71 + tmp94 tmp98 = tmp96 + tmp97 tmp100 = tmp98 + tmp99 tmp102 = tmp100 + tmp101 tmp103 = tmp102.to(tl.float32) tmp104 = 0.5 tmp105 = tmp103 * tmp104 tmp106 = tmp95 / tmp105 tl.store(in_out_ptr0 + (x2), tmp106, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/nb/cnbz3iw2gpugip67l6h2otza3fqf2f4j2owxudywyisgy2riha6f.py # Topologically Sorted Source Nodes: [x_mask, mul, x_centralization, mu, sub_1, pow_1, sum_6], Original ATen: [aten.ne, aten.mul, aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # mu => mul_2 # mul => mul # pow_1 => pow_1 # sub_1 => sub_1 # sum_6 => sum_6 # x_centralization => sub # x_mask => ne # Graph fragment: # %ne : [num_users=3] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ne, %unsqueeze), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mul), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %ne), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %mul_2), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {}) triton_poi_fused_mul_ne_pow_sub_sum_2 = async_compile.triton('triton_poi_fused_mul_ne_pow_sub_sum_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_ne_pow_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_ne_pow_sub_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = (xindex // 4) x2 = (xindex // 16) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x3)), xmask) tmp4 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 + x0 + (16*x3)), xmask) tmp14 = tl.load(in_ptr0 + (4 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (8 + x0 + (16*x3)), xmask) tmp24 = tl.load(in_ptr0 + (8 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (12 + x0 + (16*x3)), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 * tmp4 tmp6 = tmp0 - tmp5 tmp8 = tmp7 * tmp3 tmp9 = tmp6 - tmp8 tmp10 = tmp9 * tmp9 tmp12 = tmp11 != tmp1 tmp13 = tmp12.to(tl.float32) tmp15 = tmp13 * tmp14 tmp16 = tmp11 - tmp15 tmp17 = tmp7 * tmp13 tmp18 = tmp16 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp10 + tmp19 tmp22 = tmp21 != tmp1 tmp23 = tmp22.to(tl.float32) tmp25 = tmp23 * tmp24 tmp26 = tmp21 - tmp25 tmp27 = tmp7 * tmp23 tmp28 = tmp26 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp20 + tmp29 tmp32 = tmp31 != tmp1 tmp33 = tmp32.to(tl.float32) tmp35 = tmp33 * tmp34 tmp36 = tmp31 - tmp35 tmp37 = tmp7 * tmp33 tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp30 + tmp39 tl.store(out_ptr0 + (x4), tmp40, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/bd/cbdskegfemnozmzlyidswziu5dx3mita3tmqef4esocl7qofah7b.py # Topologically Sorted Source Nodes: [pow_2], Original ATen: [aten.pow] # Source node to ATen node mapping: # pow_2 => pow_2 # Graph fragment: # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%unsqueeze_5, 0.5), kwargs = {}) triton_poi_fused_pow_3 = async_compile.triton('triton_poi_fused_pow_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp7 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = tmp13.to(tl.float32) tmp15 = tmp6 / tmp14 tmp16 = libdevice.sqrt(tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/zi/czi5emogn6womdlbbnjoooxluyqy3isbl4gpypc23kz24ojplqny.py # Topologically Sorted Source Nodes: [x_mask, mul, x_centralization, mu, sub_2, pow_2, bn_x], Original ATen: [aten.ne, aten.mul, aten.sub, aten.pow, aten.div] # Source node to ATen node mapping: # bn_x => div_2 # mu => mul_2 # mul => mul # pow_2 => pow_2 # sub_2 => sub_2 # x_centralization => sub # x_mask => ne # Graph fragment: # %ne : [num_users=3] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg0_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ne, %unsqueeze), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mul), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_3, %ne), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %mul_2), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%unsqueeze_5, 0.5), kwargs = {}) # %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %pow_2), kwargs = {}) triton_poi_fused_div_mul_ne_pow_sub_4 = async_compile.triton('triton_poi_fused_div_mul_ne_pow_sub_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_ne_pow_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_mul_ne_pow_sub_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = (xindex // 64) x5 = xindex % 16 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp4 = tl.load(in_ptr0 + (x5 + (64*x3)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (x0 + (4*x3)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (x0 + (4*x3)), xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 * tmp4 tmp6 = tmp0 - tmp5 tmp8 = tmp7 * tmp3 tmp9 = tmp6 - tmp8 tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + (x4), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64) # Topologically Sorted Source Nodes: [x_mask, sum_1, sum_2], Original ATen: [aten.ne, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_ne_sum_0.run(arg0_1, buf1, 16, grid=grid(16), stream=stream0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x_mask, mul, x_centralization, sum_4, none_zero_sum, mul_1, x_mean], Original ATen: [aten.ne, aten.mul, aten.sub, aten.sum, aten.div] triton_poi_fused_div_mul_ne_sub_sum_1.run(buf2, arg0_1, buf1, 16, grid=grid(16), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_mask, mul, x_centralization, mu, sub_1, pow_1, sum_6], Original ATen: [aten.ne, aten.mul, aten.sub, aten.pow, aten.sum] triton_poi_fused_mul_ne_pow_sub_sum_2.run(arg0_1, buf2, buf3, 64, grid=grid(64), stream=stream0) buf4 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_2], Original ATen: [aten.pow] triton_poi_fused_pow_3.run(buf3, buf1, buf4, 16, grid=grid(16), stream=stream0) del buf1 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_mask, mul, x_centralization, mu, sub_2, pow_2, bn_x], Original ATen: [aten.ne, aten.mul, aten.sub, aten.pow, aten.div] triton_poi_fused_div_mul_ne_pow_sub_4.run(arg0_1, buf2, buf4, buf5, 256, grid=grid(256), stream=stream0) del arg0_1 del buf2 del buf4 return (buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class Mask_BN(nn.Module): def __init__(self): super(Mask_BN, self).__init__() def forward(self, x): x_mask = x != 0 x_centralization = x - x_mask * x[:, 0, :, :].unsqueeze(1) none_zero_n = x_mask.sum(axis=3).sum(axis=2).sum(axis=1).unsqueeze(1) none_zero_sum = x_centralization.sum(axis=2).sum(axis=1) x_mean = none_zero_sum / (none_zero_n * 0.5) mu = x_mean.unsqueeze(1).unsqueeze(2) * x_mask var = (((x_centralization - mu) ** 2).sum(axis=2).sum(axis=1) / none_zero_n).unsqueeze(1).unsqueeze(2) bn_x = (x_centralization - mu) / var ** 0.5 return bn_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_ne_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp8 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp16 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp43 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp48 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp51 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp55 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp59 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.int64) tmp5 = tmp4 != tmp1 tmp6 = tmp5.to(tl.int64) tmp7 = tmp3 + tmp6 tmp9 = tmp8 != tmp1 tmp10 = tmp9.to(tl.int64) tmp11 = tmp7 + tmp10 tmp13 = tmp12 != tmp1 tmp14 = tmp13.to(tl.int64) tmp15 = tmp11 + tmp14 tmp17 = tmp16 != tmp1 tmp18 = tmp17.to(tl.int64) tmp20 = tmp19 != tmp1 tmp21 = tmp20.to(tl.int64) tmp22 = tmp18 + tmp21 tmp24 = tmp23 != tmp1 tmp25 = tmp24.to(tl.int64) tmp26 = tmp22 + tmp25 tmp28 = tmp27 != tmp1 tmp29 = tmp28.to(tl.int64) tmp30 = tmp26 + tmp29 tmp31 = tmp15 + tmp30 tmp33 = tmp32 != tmp1 tmp34 = tmp33.to(tl.int64) tmp36 = tmp35 != tmp1 tmp37 = tmp36.to(tl.int64) tmp38 = tmp34 + tmp37 tmp40 = tmp39 != tmp1 tmp41 = tmp40.to(tl.int64) tmp42 = tmp38 + tmp41 tmp44 = tmp43 != tmp1 tmp45 = tmp44.to(tl.int64) tmp46 = tmp42 + tmp45 tmp47 = tmp31 + tmp46 tmp49 = tmp48 != tmp1 tmp50 = tmp49.to(tl.int64) tmp52 = tmp51 != tmp1 tmp53 = tmp52.to(tl.int64) tmp54 = tmp50 + tmp53 tmp56 = tmp55 != tmp1 tmp57 = tmp56.to(tl.int64) tmp58 = tmp54 + tmp57 tmp60 = tmp59 != tmp1 tmp61 = tmp60.to(tl.int64) tmp62 = tmp58 + tmp61 tmp63 = tmp47 + tmp62 tl.store(out_ptr0 + x0, tmp63, xmask) @triton.jit def triton_poi_fused_div_mul_ne_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp6 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp24 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp29 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp35 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp41 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp48 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp53 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp59 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp65 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp72 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp77 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp83 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp89 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp96 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp97 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp99 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp101 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tmp3 * tmp0 tmp5 = tmp0 - tmp4 tmp7 = tmp6 != tmp1 tmp8 = tmp7.to(tl.float32) tmp9 = tmp8 * tmp6 tmp10 = tmp6 - tmp9 tmp11 = tmp5 + tmp10 tmp13 = tmp12 != tmp1 tmp14 = tmp13.to(tl.float32) tmp15 = tmp14 * tmp12 tmp16 = tmp12 - tmp15 tmp17 = tmp11 + tmp16 tmp19 = tmp18 != tmp1 tmp20 = tmp19.to(tl.float32) tmp21 = tmp20 * tmp18 tmp22 = tmp18 - tmp21 tmp23 = tmp17 + tmp22 tmp25 = tmp24 != tmp1 tmp26 = tmp25.to(tl.float32) tmp27 = tmp26 * tmp0 tmp28 = tmp24 - tmp27 tmp30 = tmp29 != tmp1 tmp31 = tmp30.to(tl.float32) tmp32 = tmp31 * tmp6 tmp33 = tmp29 - tmp32 tmp34 = tmp28 + tmp33 tmp36 = tmp35 != tmp1 tmp37 = tmp36.to(tl.float32) tmp38 = tmp37 * tmp12 tmp39 = tmp35 - tmp38 tmp40 = tmp34 + tmp39 tmp42 = tmp41 != tmp1 tmp43 = tmp42.to(tl.float32) tmp44 = tmp43 * tmp18 tmp45 = tmp41 - tmp44 tmp46 = tmp40 + tmp45 tmp47 = tmp23 + tmp46 tmp49 = tmp48 != tmp1 tmp50 = tmp49.to(tl.float32) tmp51 = tmp50 * tmp0 tmp52 = tmp48 - tmp51 tmp54 = tmp53 != tmp1 tmp55 = tmp54.to(tl.float32) tmp56 = tmp55 * tmp6 tmp57 = tmp53 - tmp56 tmp58 = tmp52 + tmp57 tmp60 = tmp59 != tmp1 tmp61 = tmp60.to(tl.float32) tmp62 = tmp61 * tmp12 tmp63 = tmp59 - tmp62 tmp64 = tmp58 + tmp63 tmp66 = tmp65 != tmp1 tmp67 = tmp66.to(tl.float32) tmp68 = tmp67 * tmp18 tmp69 = tmp65 - tmp68 tmp70 = tmp64 + tmp69 tmp71 = tmp47 + tmp70 tmp73 = tmp72 != tmp1 tmp74 = tmp73.to(tl.float32) tmp75 = tmp74 * tmp0 tmp76 = tmp72 - tmp75 tmp78 = tmp77 != tmp1 tmp79 = tmp78.to(tl.float32) tmp80 = tmp79 * tmp6 tmp81 = tmp77 - tmp80 tmp82 = tmp76 + tmp81 tmp84 = tmp83 != tmp1 tmp85 = tmp84.to(tl.float32) tmp86 = tmp85 * tmp12 tmp87 = tmp83 - tmp86 tmp88 = tmp82 + tmp87 tmp90 = tmp89 != tmp1 tmp91 = tmp90.to(tl.float32) tmp92 = tmp91 * tmp18 tmp93 = tmp89 - tmp92 tmp94 = tmp88 + tmp93 tmp95 = tmp71 + tmp94 tmp98 = tmp96 + tmp97 tmp100 = tmp98 + tmp99 tmp102 = tmp100 + tmp101 tmp103 = tmp102.to(tl.float32) tmp104 = 0.5 tmp105 = tmp103 * tmp104 tmp106 = tmp95 / tmp105 tl.store(in_out_ptr0 + x2, tmp106, xmask) @triton.jit def triton_poi_fused_mul_ne_pow_sub_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x3 = xindex // 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x3), xmask) tmp4 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (4 + x0 + 16 * x3), xmask) tmp14 = tl.load(in_ptr0 + (4 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (8 + x0 + 16 * x3), xmask) tmp24 = tl.load(in_ptr0 + (8 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr0 + (12 + x0 + 16 * x3), xmask) tmp34 = tl.load(in_ptr0 + (12 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 * tmp4 tmp6 = tmp0 - tmp5 tmp8 = tmp7 * tmp3 tmp9 = tmp6 - tmp8 tmp10 = tmp9 * tmp9 tmp12 = tmp11 != tmp1 tmp13 = tmp12.to(tl.float32) tmp15 = tmp13 * tmp14 tmp16 = tmp11 - tmp15 tmp17 = tmp7 * tmp13 tmp18 = tmp16 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp10 + tmp19 tmp22 = tmp21 != tmp1 tmp23 = tmp22.to(tl.float32) tmp25 = tmp23 * tmp24 tmp26 = tmp21 - tmp25 tmp27 = tmp7 * tmp23 tmp28 = tmp26 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp20 + tmp29 tmp32 = tmp31 != tmp1 tmp33 = tmp32.to(tl.float32) tmp35 = tmp33 * tmp34 tmp36 = tmp31 - tmp35 tmp37 = tmp7 * tmp33 tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp30 + tmp39 tl.store(out_ptr0 + x4, tmp40, xmask) @triton.jit def triton_poi_fused_pow_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp7 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp9 = tmp7 + tmp8 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp14 = tmp13.to(tl.float32) tmp15 = tmp6 / tmp14 tmp16 = libdevice.sqrt(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_div_mul_ne_pow_sub_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x3 = xindex // 64 x5 = xindex % 16 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp4 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (x0 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr2 + (x0 + 4 * x3), xmask, eviction_policy= 'evict_last') tmp1 = 0.0 tmp2 = tmp0 != tmp1 tmp3 = tmp2.to(tl.float32) tmp5 = tmp3 * tmp4 tmp6 = tmp0 - tmp5 tmp8 = tmp7 * tmp3 tmp9 = tmp6 - tmp8 tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_ne_sum_0[grid(16)](arg0_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = buf0 del buf0 triton_poi_fused_div_mul_ne_sub_sum_1[grid(16)](buf2, arg0_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_mul_ne_pow_sub_sum_2[grid(64)](arg0_1, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) triton_poi_fused_pow_3[grid(16)](buf3, buf1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_div_mul_ne_pow_sub_4[grid(256)](arg0_1, buf2, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del buf2 del buf4 return buf5, class Mask_BNNew(nn.Module): def __init__(self): super(Mask_BNNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
XIAOYEJIAYOU/GSAN
Mask_BN
false
18,086
[ "MIT" ]
6
8ca4fdf4c3d615af9cc10e1f9f22ceb7e27fe196
https://github.com/XIAOYEJIAYOU/GSAN/tree/8ca4fdf4c3d615af9cc10e1f9f22ceb7e27fe196