entry_point
stringlengths
1
65
original_triton_code
stringlengths
4.5k
619k
python_code
stringlengths
208
60.9k
triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
SequenceClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wj/cwj6mseepwwsl64zrhfvnzd4gkux6dcys7drtkhi2dsgv2axk6do.py # Topologically Sorted Source Nodes: [pooled_sum, pooled_mean], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # pooled_mean => div # pooled_sum => sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [1]), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {}) triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ey/cey6dsgmzj2byupf73e6nwt5fetf5ne2sa57kzcmy7ejvaqhqb72.py # Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # output_states_2 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4n/c4nc446yqbrtfyayl4mzt4mxucu6lyinpbl5i77rrpgokfkjfnsn.py # Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # output_states_4 => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [-1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/e5/ce5suhs2e2ygw6kp4ycmsbsq4xfgw573srqfqshl4crsnmymkvfl.py # Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # output_states_4 => exp, log, sub_1, sum_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_3 = async_compile.triton('triton_poi_fused__log_softmax_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pooled_sum, pooled_mean], Original ATen: [aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sum_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [output_states_2], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf6, 64, grid=grid(64), stream=stream0) del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_states_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf3, buf4, 64, grid=grid(64), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [output_states_4], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_3.run(buf4, buf5, 64, grid=grid(64), stream=stream0) del buf4 return (buf5, buf0, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F def transformer_weights_init(module, std_init_range=0.02, xavier=True): """ Initialize different weights in Transformer model. Args: module: torch.nn.Module to be initialized std_init_range: standard deviation of normal initializer xavier: if True, xavier initializer will be used in Linear layers as was proposed in AIAYN paper, otherwise normal initializer will be used (like in BERT paper) """ if isinstance(module, nn.Linear): if xavier: nn.init.xavier_uniform_(module.weight) else: nn.init.normal_(module.weight, mean=0.0, std=std_init_range) if module.bias is not None: nn.init.constant_(module.bias, 0.0) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, mean=0.0, std=std_init_range) elif isinstance(module, nn.LayerNorm): nn.init.constant_(module.weight, 1.0) nn.init.constant_(module.bias, 0.0) class SelfAttention(nn.Module): def __init__(self, hidden_size, batch_first=True): super(SelfAttention, self).__init__() self.hidden_size = hidden_size self.batch_first = batch_first self.register_parameter('att_weights', nn.Parameter(torch.Tensor(1, hidden_size), requires_grad=True)) nn.init.xavier_uniform_(self.att_weights.data) def get_mask(self): pass def forward(self, hidden_states, attention_mask=None): if self.batch_first: batch_size, _max_len = hidden_states.size()[:2] else: _max_len, batch_size = hidden_states.size()[:2] weights = torch.bmm(hidden_states, self.att_weights.permute(1, 0). unsqueeze(0).repeat(batch_size, 1, 1)) attentions = F.softmax(torch.tanh(weights.squeeze()), dim=-1) masked = attentions * attention_mask if len(attentions.shape) == 1: attentions = attentions.unsqueeze(0) _sums = masked.sum(-1, keepdim=True).expand(attentions.shape) attentions = masked.div(_sums) weighted = torch.mul(hidden_states, attentions.unsqueeze(-1). expand_as(hidden_states)) representations = weighted.sum(1).squeeze(dim=1) return representations, attentions class MultiLayerPerceptron(torch.nn.Module): """ A simple MLP that can either be used independently or put on top of pretrained models (such as BERT) and act as a classifier. Args: hidden_size (int): the size of each layer num_classes (int): number of output classes num_layers (int): number of layers activation (str): type of activations for layers in between log_softmax (bool): whether to add a log_softmax layer before output """ def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers: 'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True): super().__init__() self.layers = 0 activations = {'relu': nn.ReLU(), 'gelu': nn.GELU(), 'sigmoid': nn. Sigmoid(), 'tanh': nn.Tanh()} for _ in range(num_layers - 1): layer = torch.nn.Linear(hidden_size, hidden_size) setattr(self, f'layer{self.layers}', layer) setattr(self, f'layer{self.layers + 1}', activations[activation]) self.layers += 2 layer = torch.nn.Linear(hidden_size, num_classes) setattr(self, f'layer{self.layers}', layer) self.layers += 1 self.log_softmax = log_softmax @property def last_linear_layer(self): return getattr(self, f'layer{self.layers - 1}') def forward(self, hidden_states): output_states = hidden_states[:] for i in range(self.layers): output_states = getattr(self, f'layer{i}')(output_states) if self.log_softmax: output_states = torch.log_softmax(output_states, dim=-1) else: output_states = torch.softmax(output_states, dim=-1) return output_states class SequenceClassifier(nn.Module): def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers: 'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True, dropout: 'float'=0.0, use_transformer_init: 'bool'=True, pooling: 'str'='mean', idx_conditioned_on: 'int'=None): """ Initializes the SequenceClassifier module. Args: hidden_size: the hidden size of the mlp head on the top of the encoder num_classes: number of the classes to predict num_layers: number of the linear layers of the mlp head on the top of the encoder activation: type of activations between layers of the mlp head log_softmax: applies the log softmax on the output dropout: the dropout used for the mlp head use_transformer_init: initializes the weights with the same approach used in Transformer idx_conditioned_on: index of the token to use as the sequence representation for the classification task, default is the first token """ super().__init__() self.log_softmax = log_softmax self._idx_conditioned_on = idx_conditioned_on self.pooling = pooling self.mlp = MultiLayerPerceptron(hidden_size=hidden_size * 2 if pooling == 'mean_max' else hidden_size, num_classes=num_classes, num_layers=num_layers, activation=activation, log_softmax= log_softmax) self.dropout = nn.Dropout(dropout) if use_transformer_init: self.apply(lambda module: transformer_weights_init(module, xavier=False)) if pooling == 'attention': self.attention = SelfAttention(hidden_size) def forward(self, hidden_states, attention_mask=None): hidden_states = self.dropout(hidden_states) if self.pooling == 'token': pooled = hidden_states[:, self._idx_conditioned_on] elif self.pooling == 'attention': pooled, _att = self.attention(hidden_states, attention_mask) else: if attention_mask is None: ct = hidden_states.shape[1] else: hidden_states = hidden_states * attention_mask.unsqueeze(2) ct = torch.sum(attention_mask, axis=1).unsqueeze(1) pooled_sum = torch.sum(hidden_states, axis=1) if self.pooling == 'mean' or self.pooling == 'mean_max': pooled_mean = torch.div(pooled_sum, ct) if self.pooling == 'max' or self.pooling == 'mean_max': pooled_max = torch.max(hidden_states, axis=1)[0] pooled = (pooled_mean if self.pooling == 'mean' else pooled_max if self.pooling == 'max' else torch.cat([pooled_mean, pooled_max], axis=-1)) logits = self.mlp(pooled) return logits, pooled def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2, primals_3, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(64)](buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0) del buf3 triton_poi_fused__log_softmax_3[grid(64)](buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf4 return buf5, buf0, reinterpret_tensor(buf0, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf5, primals_4, buf6 def transformer_weights_init(module, std_init_range=0.02, xavier=True): """ Initialize different weights in Transformer model. Args: module: torch.nn.Module to be initialized std_init_range: standard deviation of normal initializer xavier: if True, xavier initializer will be used in Linear layers as was proposed in AIAYN paper, otherwise normal initializer will be used (like in BERT paper) """ if isinstance(module, nn.Linear): if xavier: nn.init.xavier_uniform_(module.weight) else: nn.init.normal_(module.weight, mean=0.0, std=std_init_range) if module.bias is not None: nn.init.constant_(module.bias, 0.0) elif isinstance(module, nn.Embedding): nn.init.normal_(module.weight, mean=0.0, std=std_init_range) elif isinstance(module, nn.LayerNorm): nn.init.constant_(module.weight, 1.0) nn.init.constant_(module.bias, 0.0) class SelfAttention(nn.Module): def __init__(self, hidden_size, batch_first=True): super(SelfAttention, self).__init__() self.hidden_size = hidden_size self.batch_first = batch_first self.register_parameter('att_weights', nn.Parameter(torch.Tensor(1, hidden_size), requires_grad=True)) nn.init.xavier_uniform_(self.att_weights.data) def get_mask(self): pass def forward(self, hidden_states, attention_mask=None): if self.batch_first: batch_size, _max_len = hidden_states.size()[:2] else: _max_len, batch_size = hidden_states.size()[:2] weights = torch.bmm(hidden_states, self.att_weights.permute(1, 0). unsqueeze(0).repeat(batch_size, 1, 1)) attentions = F.softmax(torch.tanh(weights.squeeze()), dim=-1) masked = attentions * attention_mask if len(attentions.shape) == 1: attentions = attentions.unsqueeze(0) _sums = masked.sum(-1, keepdim=True).expand(attentions.shape) attentions = masked.div(_sums) weighted = torch.mul(hidden_states, attentions.unsqueeze(-1). expand_as(hidden_states)) representations = weighted.sum(1).squeeze(dim=1) return representations, attentions class MultiLayerPerceptron(torch.nn.Module): """ A simple MLP that can either be used independently or put on top of pretrained models (such as BERT) and act as a classifier. Args: hidden_size (int): the size of each layer num_classes (int): number of output classes num_layers (int): number of layers activation (str): type of activations for layers in between log_softmax (bool): whether to add a log_softmax layer before output """ def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers: 'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True): super().__init__() self.layers = 0 activations = {'relu': nn.ReLU(), 'gelu': nn.GELU(), 'sigmoid': nn. Sigmoid(), 'tanh': nn.Tanh()} for _ in range(num_layers - 1): layer = torch.nn.Linear(hidden_size, hidden_size) setattr(self, f'layer{self.layers}', layer) setattr(self, f'layer{self.layers + 1}', activations[activation]) self.layers += 2 layer = torch.nn.Linear(hidden_size, num_classes) setattr(self, f'layer{self.layers}', layer) self.layers += 1 self.log_softmax = log_softmax @property def last_linear_layer(self): return getattr(self, f'layer{self.layers - 1}') def forward(self, hidden_states): output_states = hidden_states[:] for i in range(self.layers): output_states = getattr(self, f'layer{i}')(output_states) if self.log_softmax: output_states = torch.log_softmax(output_states, dim=-1) else: output_states = torch.softmax(output_states, dim=-1) return output_states class SequenceClassifierNew(nn.Module): def __init__(self, hidden_size: 'int', num_classes: 'int', num_layers: 'int'=2, activation: 'str'='relu', log_softmax: 'bool'=True, dropout: 'float'=0.0, use_transformer_init: 'bool'=True, pooling: 'str'='mean', idx_conditioned_on: 'int'=None): """ Initializes the SequenceClassifier module. Args: hidden_size: the hidden size of the mlp head on the top of the encoder num_classes: number of the classes to predict num_layers: number of the linear layers of the mlp head on the top of the encoder activation: type of activations between layers of the mlp head log_softmax: applies the log softmax on the output dropout: the dropout used for the mlp head use_transformer_init: initializes the weights with the same approach used in Transformer idx_conditioned_on: index of the token to use as the sequence representation for the classification task, default is the first token """ super().__init__() self.log_softmax = log_softmax self._idx_conditioned_on = idx_conditioned_on self.pooling = pooling self.mlp = MultiLayerPerceptron(hidden_size=hidden_size * 2 if pooling == 'mean_max' else hidden_size, num_classes=num_classes, num_layers=num_layers, activation=activation, log_softmax= log_softmax) self.dropout = nn.Dropout(dropout) if use_transformer_init: self.apply(lambda module: transformer_weights_init(module, xavier=False)) if pooling == 'attention': self.attention = SelfAttention(hidden_size) def forward(self, input_0): primals_2 = self.mlp.layer0.weight primals_3 = self.mlp.layer0.bias primals_4 = self.mlp.layer2.weight primals_5 = self.mlp.layer2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
ngxingyu/Domain-Transfer-for-Punctuation-Retrieval
SequenceClassifier
false
7,340
[ "Apache-2.0" ]
1
f5aa0ea0946c68aaf7fcf49a5085e6c823766a2f
https://github.com/ngxingyu/Domain-Transfer-for-Punctuation-Retrieval/tree/f5aa0ea0946c68aaf7fcf49a5085e6c823766a2f
ScaleNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5i/c5iachkxsftg5m5zniolyz4aeojcm42muwsrb2a34ygecaxk5f47.py # Topologically Sorted Source Nodes: [norm, clamp, norm_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] # Source node to ATen node mapping: # clamp => clamp_min # norm => pow_1, pow_2, sum_1 # norm_1 => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-05 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp1 / tmp15 tl.store(out_ptr0 + (x0), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pq/cpqdtx3g3pyjzwakdnj54l47sm3lyvtzkr3iagg7am3bkbp6ommo.py # Topologically Sorted Source Nodes: [norm, clamp, norm_1, mul], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul] # Source node to ATen node mapping: # clamp => clamp_min # mul => mul # norm => pow_1, pow_2, sum_1 # norm_1 => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-05), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %clamp_min), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_mul_1 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, norm_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0.run(primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, norm_1, mul], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.mul] triton_poi_fused_clamp_div_linalg_vector_norm_mul_1.run(primals_2, buf0, buf1, 256, grid=grid(256), stream=stream0) del buf0 return (buf1, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn class ScaleNorm(nn.Module): """ScaleNorm""" """All g’s in SCALE NORM are initialized to sqrt(d)""" def __init__(self, scale, eps=1e-05): super(ScaleNorm, self).__init__() self.scale = nn.Parameter(torch.tensor(math.sqrt(scale))) self.eps = eps def forward(self, x): norm = self.scale / torch.norm(x, dim=-1, keepdim=True).clamp(min= self.eps) return x * norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = libdevice.sqrt(tmp12) tmp14 = 1e-05 tmp15 = triton_helpers.maximum(tmp13, tmp14) tmp16 = tmp1 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(64)](primals_1, primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_div_linalg_vector_norm_mul_1[grid(256)]( primals_2, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, primals_2 class ScaleNormNew(nn.Module): """ScaleNorm""" """All g’s in SCALE NORM are initialized to sqrt(d)""" def __init__(self, scale, eps=1e-05): super(ScaleNormNew, self).__init__() self.scale = nn.Parameter(torch.tensor(math.sqrt(scale))) self.eps = eps def forward(self, input_0): primals_1 = self.scale primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
nigelnnk/MATCh-sensitivity
ScaleNorm
false
7,341
[ "MIT" ]
1
aaf2b924ac98c8c5925bbf431481724d11a102f8
https://github.com/nigelnnk/MATCh-sensitivity/tree/aaf2b924ac98c8c5925bbf431481724d11a102f8
StyleResidual
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nj/cnjqvzdxcui5ygocv2a5nlfxiqfekt6jgipfoplz34gwfzo2zd5f.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_4, %squeeze), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_out_ptr0 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf1, primals_4, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 del primals_4 return (buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.utils.data import torch.optim class StyleResidual(nn.Module): """Styling.""" def __init__(self, d_channel: 'int', d_style: 'int', kernel_size: 'int'=1): super().__init__() self.rs = nn.Conv1d(in_channels=d_style, out_channels=d_channel, kernel_size=kernel_size, stride=1, padding=kernel_size // 2) def forward(self, x: 'torch.Tensor', s: 'torch.Tensor') ->torch.Tensor: """`x`: [B,C,T], `s`: [B,S,T] => [B,C,T].""" return x + self.rs(s) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_channel': 4, 'd_style': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(16)](buf1, primals_4, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 del primals_4 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0) class StyleResidualNew(nn.Module): """Styling.""" def __init__(self, d_channel: 'int', d_style: 'int', kernel_size: 'int'=1): super().__init__() self.rs = nn.Conv1d(in_channels=d_style, out_channels=d_channel, kernel_size=kernel_size, stride=1, padding=kernel_size // 2) def forward(self, input_0, input_1): primals_1 = self.rs.weight primals_2 = self.rs.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
niklub/NeMo
StyleResidual
false
7,342
[ "Apache-2.0" ]
1
4bcb2321cd16835f63afe3dfe993e6d56bcf2c0c
https://github.com/niklub/NeMo/tree/4bcb2321cd16835f63afe3dfe993e6d56bcf2c0c
CQLAgent
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 64), (64, 1)) assert_size_stride(primals_5, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse buf3 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 16), (1, 64), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), primals_4, buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from scipy import optimize class CQLAgent(nn.Module): def __init__(self, input_shape, n_actions, n_opponent_actions, hidden_dim=64): super(CQLAgent, self).__init__() self.fc1 = nn.Linear(input_shape, hidden_dim) self.fc2 = nn.Linear(hidden_dim, n_actions * n_opponent_actions) self.n_actions = n_actions self.n_opponent_actions = n_opponent_actions def forward(self, inputs): x = F.relu(self.fc1(inputs)) q = self.fc2(x) q = q.view(-1, self.n_opponent_actions, self.n_actions) return q """ @param: inputs: [batch, input_shape] policy_disc: whether to use the discrete policy @ retval: problem distributions of the action shape: [batch, n_actions] type:np.array """ def get_policy(self, inputs, policy_disc=True): qvals = self.forward(inputs) if policy_disc: qvals = th.min(qvals, axis=1)[0] actions = th.argmax(qvals, axis=1) policys = F.one_hot(actions, num_classes=self.n_actions).float( ).detach() else: policys = [] qvals = qvals.detach().numpy() for qval_sample in qvals: c = np.array([0] * self.n_actions + [-1]) A_ub = np.concatenate((-qval_sample, np.ones((self. n_opponent_actions, 1))), axis=1) B_ub = np.zeros(self.n_opponent_actions) A_eq = np.array([[1] * self.n_actions + [0]]) B_eq = np.array([1]) bounds = [] for a in range(self.n_actions): bounds.append((0, 1)) bounds.append((None, None)) res = optimize.linprog(c, A_ub, B_ub, A_eq, B_eq, bounds= tuple(bounds)) policy = res['x'] policys.append(policy[:-1]) policys = th.tensor(policys, dtype=th.float32) return policys def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_shape': 4, 'n_actions': 4, 'n_opponent_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from scipy import optimize assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 64), (64, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_2, buf3, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 16), (1, 64), 0 ), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), primals_4, buf3 class CQLAgentNew(nn.Module): def __init__(self, input_shape, n_actions, n_opponent_actions, hidden_dim=64): super(CQLAgentNew, self).__init__() self.fc1 = nn.Linear(input_shape, hidden_dim) self.fc2 = nn.Linear(hidden_dim, n_actions * n_opponent_actions) self.n_actions = n_actions self.n_opponent_actions = n_opponent_actions """ @param: inputs: [batch, input_shape] policy_disc: whether to use the discrete policy @ retval: problem distributions of the action shape: [batch, n_actions] type:np.array """ def get_policy(self, inputs, policy_disc=True): qvals = self.forward(inputs) if policy_disc: qvals = th.min(qvals, axis=1)[0] actions = th.argmax(qvals, axis=1) policys = F.one_hot(actions, num_classes=self.n_actions).float( ).detach() else: policys = [] qvals = qvals.detach().numpy() for qval_sample in qvals: c = np.array([0] * self.n_actions + [-1]) A_ub = np.concatenate((-qval_sample, np.ones((self. n_opponent_actions, 1))), axis=1) B_ub = np.zeros(self.n_opponent_actions) A_eq = np.array([[1] * self.n_actions + [0]]) B_eq = np.array([1]) bounds = [] for a in range(self.n_actions): bounds.append((0, 1)) bounds.append((None, None)) res = optimize.linprog(c, A_ub, B_ub, A_eq, B_eq, bounds= tuple(bounds)) policy = res['x'] policys.append(policy[:-1]) policys = th.tensor(policys, dtype=th.float32) return policys def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
netlab-lcy/CMIX
CQLAgent
false
7,343
[ "MIT" ]
1
53e2d8794af2b380295efe06dcb05235089953c1
https://github.com/netlab-lcy/CMIX/tree/53e2d8794af2b380295efe06dcb05235089953c1
MultiHeadAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/u7/cu7vpvizjd4aqykmop7o4bb3wko4x255a6cbzzzpnk3hxneiqhcw.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) % 16 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x2) + (512*x1)), None) tmp1 = tl.load(in_ptr1 + (x0 + (128*x2)), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x3), tmp2, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py # Topologically Sorted Source Nodes: [attn_distribution], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_distribution => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py # Topologically Sorted Source Nodes: [attn_distribution], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_distribution => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7q/c7q5bvn7twuxhcrmxt4henfecjupsuo5yrwvo2hhumobsmaauisq.py # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # combined => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_9, %primals_2], 2), kwargs = {}) triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 516 x3 = (xindex // 516) x2 = (xindex // 2064) x4 = xindex % 2064 tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((128*x3) + (2048*((x0 // 128) % 4)) + (x0 % 128)), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 516, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x3) + ((-512) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x4 + (2080*x2)), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wf/cwfbwzeyrr36zca5qiiebhnejmrgzdsjwmzyit7xy4nubbdp6hnq.py # Topologically Sorted Source Nodes: [combined, view_6], Original ATen: [aten.cat, aten.view] # Source node to ATen node mapping: # combined => cat # view_6 => view_10 # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_9, %primals_2], 2), kwargs = {}) # %view_10 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%cat, [-1, 8]), kwargs = {}) triton_poi_fused_cat_view_4 = async_compile.triton('triton_poi_fused_cat_view_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_view_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + ((2080*(x0 // 2064)) + (x0 % 2064)), xmask) tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ag/cagrpzpgi7zvecwkentweyxoalabxs5bqjnsvvbboefkenvqapba.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh, aten.tanh_backward] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul), kwargs = {}) triton_poi_fused_tanh_tanh_backward_5 = async_compile.triton('triton_poi_fused_tanh_tanh_backward_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_tanh_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_tanh_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + (x2), tmp3, xmask) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (512, 4), (4, 1)) assert_size_stride(primals_4, (512, ), (1, )) assert_size_stride(primals_5, (512, 4), (4, 1)) assert_size_stride(primals_6, (512, ), (1, )) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 512), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 512), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(buf0, primals_4, buf2, 8192, grid=grid(8192), stream=stream0) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone] triton_poi_fused_clone_0.run(buf1, primals_6, buf3, 8192, grid=grid(8192), stream=stream0) del primals_6 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 128), (512, 128, 1), 0), reinterpret_tensor(buf3, (16, 128, 4), (512, 1, 128), 0), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [attn_distribution], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [attn_distribution], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0) del buf5 buf7 = reinterpret_tensor(buf1, (16, 4, 128), (512, 128, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, reinterpret_tensor(buf3, (16, 4, 128), (512, 128, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 516), (2080, 516, 1), torch.float32) # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] triton_poi_fused_cat_3.run(buf7, primals_2, buf8, 8256, grid=grid(8256), stream=stream0) del buf7 buf9 = empty_strided_cuda((1032, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [combined, view_6], Original ATen: [aten.cat, aten.view] triton_poi_fused_cat_view_4.run(buf8, buf9, 8256, grid=grid(8256), stream=stream0) del buf8 buf10 = empty_strided_cuda((1032, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf9, reinterpret_tensor(primals_7, (8, 4), (1, 8), 0), out=buf10) buf11 = buf10; del buf10 # reuse buf12 = empty_strided_cuda((1032, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh, aten.tanh_backward] triton_poi_fused_tanh_tanh_backward_5.run(buf11, primals_8, buf12, 4128, grid=grid(4128), stream=stream0) del primals_8 return (reinterpret_tensor(buf11, (4, 258, 4), (1032, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(buf3, (16, 128, 4), (512, 1, 128), 0), buf6, buf9, buf12, primals_7, reinterpret_tensor(buf2, (16, 128, 4), (512, 1, 128), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class MultiHeadAttention(nn.Module): """ Applies an multi-head attention mechanism on the output features from the decoder. Refer to 「State-of-the-art Speech Recognition With Sequence-to-Sequence Models」 Paper https://arxiv.org/abs/1712.01769 Args: in_features (int): The number of expected features in the output n_head (int): number of heads. (default: 4) dim (int): dimension size of sub heads. (default: 128) Inputs: query, key - **query** (batch, output_len, dimensions): tensor containing the output features from the decoder. - **key** (batch, input_len, dimensions): tensor containing features of the encoded input sequence. Returns: output - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder. Examples:: >>> attention = MultiHeadAttention(in_features, n_head=4, dim=128) >>> output = attention(query, key) """ def __init__(self, in_features, n_head=4, dim=128): super(MultiHeadAttention, self).__init__() self.in_features = in_features self.linear_q = nn.Linear(in_features, dim * n_head) self.linear_k = nn.Linear(in_features, dim * n_head) self.n_head = n_head self.dim = dim self.out = nn.Linear(in_features << 1, in_features) def forward(self, query, key): batch_size = key.size(0) query_length = query.size(1) key_length = key.size(1) preserved = query query = self.linear_q(query).view(batch_size, query_length, self. n_head, self.dim).permute(2, 0, 1, 3) key = self.linear_k(key).view(batch_size, key_length, self.n_head, self.dim).permute(2, 0, 1, 3) query = query.contiguous().view(-1, query_length, self.dim) key = key.contiguous().view(-1, key_length, self.dim) attn_score = torch.bmm(query, key.transpose(1, 2)) attn_distribution = F.softmax(attn_score, dim=2) context = torch.bmm(attn_distribution, key).view(self.n_head, batch_size, query_length, self.dim) context = context.permute(1, 2, 0, 3).contiguous().view(batch_size, query_length, -1) combined = torch.cat([context, preserved], dim=2) output = torch.tanh(self.out(combined.view(-1, 2 * self.in_features)) ).view(batch_size, -1, self.in_features) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x2 + 512 * x1), None) tmp1 = tl.load(in_ptr1 + (x0 + 128 * x2), None, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 8256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 516 x3 = xindex // 516 x2 = xindex // 2064 x4 = xindex % 2064 tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 512, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (128 * x3 + 2048 * (x0 // 128 % 4) + x0 % 128), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 516, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x3 + (-512 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x4 + 2080 * x2), tmp10, xmask) @triton.jit def triton_poi_fused_cat_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 8256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (2080 * (x0 // 2064) + x0 % 2064), xmask) tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (512, 4), (4, 1)) assert_size_stride(primals_4, (512,), (1,)) assert_size_stride(primals_5, (512, 4), (4, 1)) assert_size_stride(primals_6, (512,), (1,)) assert_size_stride(primals_7, (4, 8), (8, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 512), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 512), (1, 4), 0), out=buf1) del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(8192)](buf0, primals_4, buf2, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf3 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 triton_poi_fused_clone_0[grid(8192)](buf1, primals_6, buf3, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 128), (512, 128, 1), 0), reinterpret_tensor(buf3, (16, 128, 4), (512, 1, 128), 0 ), out=buf4) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf7 = reinterpret_tensor(buf1, (16, 4, 128), (512, 128, 1), 0) del buf1 extern_kernels.bmm(buf6, reinterpret_tensor(buf3, (16, 4, 128), ( 512, 128, 1), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 516), (2080, 516, 1), torch.float32) triton_poi_fused_cat_3[grid(8256)](buf7, primals_2, buf8, 8256, XBLOCK=256, num_warps=4, num_stages=1) del buf7 buf9 = empty_strided_cuda((1032, 8), (8, 1), torch.float32) triton_poi_fused_cat_view_4[grid(8256)](buf8, buf9, 8256, XBLOCK= 256, num_warps=4, num_stages=1) del buf8 buf10 = empty_strided_cuda((1032, 4), (4, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_7, (8, 4), (1, 8 ), 0), out=buf10) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((1032, 4), (4, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_5[grid(4128)](buf11, primals_8, buf12, 4128, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 return reinterpret_tensor(buf11, (4, 258, 4), (1032, 4, 1), 0 ), reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf3, (16, 128, 4), (512, 1, 128), 0 ), buf6, buf9, buf12, primals_7, reinterpret_tensor(buf2, (16, 128, 4), (512, 1, 128), 0) class MultiHeadAttentionNew(nn.Module): """ Applies an multi-head attention mechanism on the output features from the decoder. Refer to 「State-of-the-art Speech Recognition With Sequence-to-Sequence Models」 Paper https://arxiv.org/abs/1712.01769 Args: in_features (int): The number of expected features in the output n_head (int): number of heads. (default: 4) dim (int): dimension size of sub heads. (default: 128) Inputs: query, key - **query** (batch, output_len, dimensions): tensor containing the output features from the decoder. - **key** (batch, input_len, dimensions): tensor containing features of the encoded input sequence. Returns: output - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder. Examples:: >>> attention = MultiHeadAttention(in_features, n_head=4, dim=128) >>> output = attention(query, key) """ def __init__(self, in_features, n_head=4, dim=128): super(MultiHeadAttentionNew, self).__init__() self.in_features = in_features self.linear_q = nn.Linear(in_features, dim * n_head) self.linear_k = nn.Linear(in_features, dim * n_head) self.n_head = n_head self.dim = dim self.out = nn.Linear(in_features << 1, in_features) def forward(self, input_0, input_1): primals_3 = self.linear_q.weight primals_4 = self.linear_q.bias primals_5 = self.linear_k.weight primals_6 = self.linear_k.bias primals_7 = self.out.weight primals_8 = self.out.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
ngbsLab/Korean-Speech-Recognition
MultiHeadAttention
false
7,344
[ "Apache-2.0" ]
1
3867bf7d23222da6812c9b98a93d3c6f7b3c80fc
https://github.com/ngbsLab/Korean-Speech-Recognition/tree/3867bf7d23222da6812c9b98a93d3c6f7b3c80fc
FocalLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py # Topologically Sorted Source Nodes: [logp], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # logp => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/jw/cjwpmepxdrhjkf3qqr4e6qwmehd4cbfk26molvzkvgaoyj3su3bt.py # Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.exp, aten.rsub, aten.pow, aten.mean] # Source node to ATen node mapping: # logp => div, exp, log, mul, neg, sub_1, sum_1, sum_2 # loss => mul_1 # mean => mean # neg => neg_1 # p => exp_1 # pow_1 => pow_1 # sub => sub_2 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {}) # %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %div), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {}) triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = (rindex // 64) tmp0 = tl.load(in_ptr0 + (r3), None) tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (r3), None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = 1.0 tmp25 = tmp24 - tmp23 tmp26 = tmp24 * tmp21 tmp27 = tmp26 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp27, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logp], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [logp, neg, p, sub, pow_1, loss, mean], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div, aten.exp, aten.rsub, aten.pow, aten.mean] triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1.run(buf2, buf0, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim import torch.utils.data import torch.autograd class FocalLoss(nn.Module): def __init__(self, gamma=0, eps=1e-07): super(FocalLoss, self).__init__() self.gamma = gamma self.eps = eps self.ce = torch.nn.CrossEntropyLoss() def forward(self, input, target): logp = self.ce(input, target) p = torch.exp(-logp) loss = (1 - p) ** self.gamma * logp return loss.mean() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.optim import torch.utils.data import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tmp20 = 0.015625 tmp21 = tmp19 * tmp20 tmp22 = -tmp21 tmp23 = tl_math.exp(tmp22) tmp24 = 1.0 tmp24 - tmp23 tmp26 = tmp24 * tmp21 tmp27 = tmp26 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp27, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_div_exp_mean_mul_neg_pow_rsub_sum_1[grid (1)](buf2, buf0, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del buf0 return buf2, class FocalLossNew(nn.Module): def __init__(self, gamma=0, eps=1e-07): super(FocalLossNew, self).__init__() self.gamma = gamma self.eps = eps self.ce = torch.nn.CrossEntropyLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
nikitajz/google-landmarks
FocalLoss
false
7,345
[ "MIT" ]
1
2051462be4450c193c98b237fc7ebdae783e2b28
https://github.com/nikitajz/google-landmarks/tree/2051462be4450c193c98b237fc7ebdae783e2b28
ClassWisePool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wj/cwj6mseepwwsl64zrhfvnzd4gkux6dcys7drtkhi2dsgv2axk6do.py # Topologically Sorted Source Nodes: [output, truediv], Original ATen: [aten.sum, aten.div] # Source node to ATen node mapping: # output => sum_1 # truediv => div # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [2]), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {}) triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [output, truediv], Original ATen: [aten.sum, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import sys from torch.autograd import Function import torch from torch import nn class ClassWisePoolFunction(Function): @staticmethod def forward(ctx, input, args): ctx.num_maps = args batch_size, num_channels, h, w = input.size() if num_channels % ctx.num_maps != 0: None sys.exit(-1) num_outputs = int(num_channels / ctx.num_maps) x = input.view(batch_size, num_outputs, ctx.num_maps, h, w) output = torch.sum(x, 2) ctx.save_for_backward(input) return output.view(batch_size, num_outputs, h, w) / ctx.num_maps @staticmethod def backward(ctx, grad_output): input, = ctx.saved_tensors batch_size, num_channels, h, w = input.size() num_outputs = grad_output.size(1) grad_output = grad_output.view(batch_size, num_outputs, 1, h, w) grad_input = grad_output.expand(batch_size, num_outputs, ctx. num_maps, h, w).contiguous() return grad_input.view(batch_size, num_channels, h, w), None class ClassWisePool(nn.Module): def __init__(self, num_maps): super(ClassWisePool, self).__init__() self.num_maps = num_maps def forward(self, input): return ClassWisePoolFunction.apply(input, self.num_maps) def __repr__(self): return self.__class__.__name__ + ' (num_maps={num_maps})'.format( num_maps=self.num_maps) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_maps': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import sys from torch.autograd import Function from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class ClassWisePoolFunction(Function): @staticmethod def forward(ctx, input, args): ctx.num_maps = args batch_size, num_channels, h, w = input.size() if num_channels % ctx.num_maps != 0: None sys.exit(-1) num_outputs = int(num_channels / ctx.num_maps) x = input.view(batch_size, num_outputs, ctx.num_maps, h, w) output = torch.sum(x, 2) ctx.save_for_backward(input) return output.view(batch_size, num_outputs, h, w) / ctx.num_maps @staticmethod def backward(ctx, grad_output): input, = ctx.saved_tensors batch_size, num_channels, h, w = input.size() num_outputs = grad_output.size(1) grad_output = grad_output.view(batch_size, num_outputs, 1, h, w) grad_input = grad_output.expand(batch_size, num_outputs, ctx. num_maps, h, w).contiguous() return grad_input.view(batch_size, num_channels, h, w), None class ClassWisePoolNew(nn.Module): def __init__(self, num_maps): super(ClassWisePoolNew, self).__init__() self.num_maps = num_maps def __repr__(self): return self.__class__.__name__ + ' (num_maps={num_maps})'.format( num_maps=self.num_maps) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
nishanthta/wsl
ClassWisePool
false
7,346
[ "MIT" ]
1
5fda3b909a314b7f88ffa9ab27a6a142de6b0159
https://github.com/nishanthta/wsl/tree/5fda3b909a314b7f88ffa9ab27a6a142de6b0159
WassersteinLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ug/cugldwv35g53yxideoh7o6qqdvauiciej3welxrwa4qfta7lmlax.py # Topologically Sorted Source Nodes: [sum_1, add, tensor_a, cdf_tensor_a], Original ATen: [aten.sum, aten.add, aten.div, aten.cumsum] # Source node to ATen node mapping: # add => add # cdf_tensor_a => cumsum # sum_1 => sum_1 # tensor_a => div # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-14), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%div, -1), kwargs = {}) triton_per_fused_add_cumsum_div_sum_0 = async_compile.triton('triton_per_fused_add_cumsum_div_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[64, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cumsum_div_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_cumsum_div_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 64 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1e-14 tmp9 = tmp7 + tmp8 tmp10 = tmp0 / tmp9 tmp11 = tmp10.to(tl.float32) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp13, = tl.associative_scan((tmp12,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + (4*x0)), tmp13, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/o7/co7to4ycybqjfm36u63qemdpnbojs735ow643af46n6zxahrimbt.py # Topologically Sorted Source Nodes: [sub, abs_1, cdf_distance, cdf_loss], Original ATen: [aten.sub, aten.abs, aten.sum, aten.mean] # Source node to ATen node mapping: # abs_1 => abs_1 # cdf_distance => sum_3 # cdf_loss => mean # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cumsum, %cumsum_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%abs_1, [-1]), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_3,), kwargs = {}) triton_per_fused_abs_mean_sub_sum_1 = async_compile.triton('triton_per_fused_abs_mean_sub_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_sub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tmp13 + tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp22 = 64.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_1, add, tensor_a, cdf_tensor_a], Original ATen: [aten.sum, aten.add, aten.div, aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused_add_cumsum_div_sum_0.run(arg0_1, buf0, 64, 4, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sum_2, add_1, tensor_b, cdf_tensor_b], Original ATen: [aten.sum, aten.add, aten.div, aten.cumsum] triton_per_fused_add_cumsum_div_sum_0.run(arg1_1, buf1, 64, 4, grid=grid(64), stream=stream0) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [sub, abs_1, cdf_distance, cdf_loss], Original ATen: [aten.sub, aten.abs, aten.sum, aten.mean] triton_per_fused_abs_mean_sub_sum_1.run(buf3, buf0, buf1, 1, 64, grid=grid(1), stream=stream0) del buf0 del buf1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def torch_cdf_loss(tensor_a, tensor_b, p=1): tensor_a = tensor_a / (torch.sum(tensor_a, dim=-1, keepdim=True) + 1e-14) tensor_b = tensor_b / (torch.sum(tensor_b, dim=-1, keepdim=True) + 1e-14) cdf_tensor_a = torch.cumsum(tensor_a, dim=-1) cdf_tensor_b = torch.cumsum(tensor_b, dim=-1) if p == 1: cdf_distance = torch.sum(torch.abs(cdf_tensor_a - cdf_tensor_b), dim=-1 ) elif p == 2: cdf_distance = torch.sqrt(torch.sum(torch.pow(cdf_tensor_a - cdf_tensor_b, 2), dim=-1)) else: cdf_distance = torch.pow(torch.sum(torch.pow(torch.abs(cdf_tensor_a - cdf_tensor_b), p), dim=-1), 1 / p) cdf_loss = cdf_distance.mean() return cdf_loss def torch_wasserstein_loss(tensor_a, tensor_b): return torch_cdf_loss(tensor_a, tensor_b, p=1) class WassersteinLoss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, tensor_a, tensor_b): return torch_wasserstein_loss(tensor_a, tensor_b) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_cumsum_div_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 1e-14 tmp9 = tmp7 + tmp8 tmp10 = tmp0 / tmp9 tmp11 = tmp10.to(tl.float32) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp13, = tl.associative_scan((tmp12,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp13, xmask) @triton.jit def triton_per_fused_abs_mean_sub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp6 = tmp4 - tmp5 tmp7 = tl_math.abs(tmp6) tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tl_math.abs(tmp11) tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tl_math.abs(tmp16) tmp18 = tmp13 + tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp22 = 64.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_cumsum_div_sum_0[grid(64)](arg0_1, buf0, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_add_cumsum_div_sum_0[grid(64)](arg1_1, buf1, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused_abs_mean_sub_sum_1[grid(1)](buf3, buf0, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, def torch_cdf_loss(tensor_a, tensor_b, p=1): tensor_a = tensor_a / (torch.sum(tensor_a, dim=-1, keepdim=True) + 1e-14) tensor_b = tensor_b / (torch.sum(tensor_b, dim=-1, keepdim=True) + 1e-14) cdf_tensor_a = torch.cumsum(tensor_a, dim=-1) cdf_tensor_b = torch.cumsum(tensor_b, dim=-1) if p == 1: cdf_distance = torch.sum(torch.abs(cdf_tensor_a - cdf_tensor_b), dim=-1 ) elif p == 2: cdf_distance = torch.sqrt(torch.sum(torch.pow(cdf_tensor_a - cdf_tensor_b, 2), dim=-1)) else: cdf_distance = torch.pow(torch.sum(torch.pow(torch.abs(cdf_tensor_a - cdf_tensor_b), p), dim=-1), 1 / p) cdf_loss = cdf_distance.mean() return cdf_loss def torch_wasserstein_loss(tensor_a, tensor_b): return torch_cdf_loss(tensor_a, tensor_b, p=1) class WassersteinLossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
nikitadhawan/SimCLR
WassersteinLoss
false
7,347
[ "MIT" ]
1
7d87b384b1edb68e7ba86601b26f76e6da214718
https://github.com/nikitadhawan/SimCLR/tree/7d87b384b1edb68e7ba86601b26f76e6da214718
CoxPHLossSorted
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5s/c5shsgehfk5krlitzfx6dsoni2ot3vtyhzwhw5d6md32oxwp3feg.py # Topologically Sorted Source Nodes: [gamma, sub, exp, cumsum, add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2, div, neg], Original ATen: [aten.max, aten.sub, aten.exp, aten.cumsum, aten.add, aten.log, aten.mul, aten.sum, aten.div, aten.neg] # Source node to ATen node mapping: # add => add # cumsum => cumsum # div => div # exp => exp # gamma => max_1 # log => log # log_cumsum_h => add_1 # mul => mul # neg => neg # sub => sub # sub_1 => sub_1 # sum_1 => sum_1 # sum_2 => sum_2 # Graph fragment: # %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%view_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %max_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%exp, 0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cumsum, 1e-07), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %max_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %add_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %view), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%div,), kwargs = {}) triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0 = async_compile.triton('triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp14 = tl.load(in_ptr1 + (r0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5.to(tl.float32) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp8, = tl.associative_scan((tmp7,), 0, _triton_helper_fn_add0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = tl.broadcast_to(tmp14, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = tmp18 / tmp21 tmp23 = -tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [gamma, sub, exp, cumsum, add, log, log_cumsum_h, sub_1, mul, sum_1, sum_2, div, neg], Original ATen: [aten.max, aten.sub, aten.exp, aten.cumsum, aten.add, aten.log, aten.mul, aten.sum, aten.div, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0.run(buf4, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch def cox_ph_loss_sorted(log_h, event, eps=1e-07): """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ event = event.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(event).sum().div(event.sum()) class CoxPHLossSorted(torch.nn.Module): """Loss for CoxPH. Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def __init__(self): super().__init__() def forward(self, log_h, events): return cox_ph_loss_sorted(log_h, events) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp14 = tl.load(in_ptr1 + r0, None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0)) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5.to(tl.float32) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp8, = tl.associative_scan((tmp7,), 0, _triton_helper_fn_add0) tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tl_math.log(tmp10) tmp12 = tmp11 + tmp3 tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = tl.broadcast_to(tmp14, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = tmp18 / tmp21 tmp23 = -tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf4 = buf2 del buf2 get_raw_stream(0) triton_per_fused_add_cumsum_div_exp_log_max_mul_neg_sub_sum_0[grid(1)]( buf4, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, def cox_ph_loss_sorted(log_h, event, eps=1e-07): """Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ event = event.view(-1) log_h = log_h.view(-1) gamma = log_h.max() log_cumsum_h = log_h.sub(gamma).exp().cumsum(0).add(eps).log().add(gamma) return -log_h.sub(log_cumsum_h).mul(event).sum().div(event.sum()) class CoxPHLossSortedNew(torch.nn.Module): """Loss for CoxPH. Requires the input to be sorted by descending duration time. See DatasetDurationSorted. We calculate the negative log of $( rac{h_i}{\\sum_{j \\in R_i} h_j})^d$, where h = exp(log_h) are the hazards and R is the risk set, and d is event. We just compute a cumulative sum, and not the true Risk sets. This is a limitation, but simple and fast. """ def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
nikolase90/pycox
CoxPHLossSorted
false
7,348
[ "BSD-2-Clause" ]
1
1c780253da7bab7eba0dc02e1436a68a9b812a66
https://github.com/nikolase90/pycox/tree/1c780253da7bab7eba0dc02e1436a68a9b812a66
leaky_hardtanh
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oi/coiikexixk72fojnq6ypmcez3cii47yphqpwammllvhkrpkcygzp.py # Topologically Sorted Source Nodes: [lt, mul, add, x, gt, mul_1, add_1, x_1], Original ATen: [aten.lt, aten.mul, aten.add, aten.where, aten.gt] # Source node to ATen node mapping: # add => add # add_1 => add_1 # gt => gt # lt => lt # mul => mul # mul_1 => mul_1 # x => where # x_1 => where_1 # Graph fragment: # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%arg0_1, -1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.01), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -1), kwargs = {}) # %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%lt, %add, %arg0_1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 0.01), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_1, %where), kwargs = {}) triton_poi_fused_add_gt_lt_mul_where_0 = async_compile.triton('triton_poi_fused_add_gt_lt_mul_where_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gt_lt_mul_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_gt_lt_mul_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = -1.0 tmp2 = tmp0 < tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tmp4 + tmp1 tmp6 = tl.where(tmp2, tmp5, tmp0) tmp7 = 1.0 tmp8 = tmp6 > tmp7 tmp9 = tmp6 * tmp3 tmp10 = tmp9 + tmp7 tmp11 = tl.where(tmp8, tmp10, tmp6) tl.store(out_ptr0 + (x0), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [lt, mul, add, x, gt, mul_1, add_1, x_1], Original ATen: [aten.lt, aten.mul, aten.add, aten.where, aten.gt] stream0 = get_raw_stream(0) triton_poi_fused_add_gt_lt_mul_where_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class leaky_hardtanh(nn.Module): def __init__(self, min=-1, max=1, slope=0.01): super(leaky_hardtanh, self).__init__() self.min = min self.max = max self.slope = slope def forward(self, x): x = torch.where(x < self.min, self.min + x * self.slope, x) x = torch.where(x > self.max, self.max + x * self.slope, x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_gt_lt_mul_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = -1.0 tmp2 = tmp0 < tmp1 tmp3 = 0.01 tmp4 = tmp0 * tmp3 tmp5 = tmp4 + tmp1 tmp6 = tl.where(tmp2, tmp5, tmp0) tmp7 = 1.0 tmp8 = tmp6 > tmp7 tmp9 = tmp6 * tmp3 tmp10 = tmp9 + tmp7 tmp11 = tl.where(tmp8, tmp10, tmp6) tl.store(out_ptr0 + x0, tmp11, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_gt_lt_mul_where_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class leaky_hardtanhNew(nn.Module): def __init__(self, min=-1, max=1, slope=0.01): super(leaky_hardtanhNew, self).__init__() self.min = min self.max = max self.slope = slope def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
nikolasmorshuis/gadolinium_prediction
leaky_hardtanh
false
7,349
[ "Apache-2.0" ]
1
7d6640df5b62ce578a947d3a9b9c701c3d1ccd79
https://github.com/nikolasmorshuis/gadolinium_prediction/tree/7d6640df5b62ce578a947d3a9b9c701c3d1ccd79
GlobalAttention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/um/cum65j23qchrjf5dndblqgbw6zomhgwfj2obfidtgy7b5j3zwklm.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wk/cwk2wao7opapqbjj7klnqrd6tgist3ts3nc5veryzhzstwpx7d4l.py # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attn_1 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zd/czdeq2ohbgubcyeps2ukquvfhigxtyega57i24ketclusfgmyedi.py # Topologically Sorted Source Nodes: [weightedContext_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # weightedContext_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%squeeze_1, %primals_2], 1), kwargs = {}) triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = (xindex // 8) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nd/cndmyjzjevhkuc7c6qt54venjnegfpn7mgzqrw5yus7yt5u3qqaj.py # Topologically Sorted Source Nodes: [weightedContext_3], Original ATen: [aten.tanh] # Source node to ATen node mapping: # weightedContext_3 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mm_1,), kwargs = {}) triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm] extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0), out=buf1) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(buf1, buf2, 16, grid=grid(16), stream=stream0) buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0); del buf1 # reuse # Topologically Sorted Source Nodes: [attn_1], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0), primals_1, out=buf4) buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) # Topologically Sorted Source Nodes: [weightedContext_1], Original ATen: [aten.cat] triton_poi_fused_cat_2.run(buf4, primals_2, buf5, 32, grid=grid(32), stream=stream0) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [weightedContext_2], Original ATen: [aten.mm] extern_kernels.mm(buf5, reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf6) buf7 = buf6; del buf6 # reuse # Topologically Sorted Source Nodes: [weightedContext_3], Original ATen: [aten.tanh] triton_poi_fused_tanh_3.run(buf7, 16, grid=grid(16), stream=stream0) return (buf7, buf3, primals_2, buf3, buf5, buf7, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.cuda def aeq(*args): base = args[0] for a in args[1:]: assert a == base, str(args) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttention(nn.Module): """ Luong Attention. Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. H_1 H_2 H_3 ... H_n q q q q | | | | \\ | | / ..... \\ | / a Constructs a unit mapping. $$(H_1 + H_n, q) => (a)$$ Where H is of `batch x n x dim` and q is of `batch x dim`. Loung Attention (dotprod): $$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.: Bahdanau Attention (mlp): $$c = \\sum_{j=1}^{SeqLength}_jh_j$$. The Alignment-function $$a$$ computes an alignment as: $$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$. """ def __init__(self, dim, coverage=False, attn_type='dotprod'): super(GlobalAttention, self).__init__() self.dim = dim self.attn_type = attn_type assert self.attn_type in ['dotprod', 'mlp' ], 'Please select a valid attention type.' if self.attn_type == 'dotprod': self.linear_in = nn.Linear(dim, dim, bias=False) self.linear_out = nn.Linear(dim * 2, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=False) self.v = BottleLinear(dim, 1, bias=False) self.sm = nn.Softmax() self.tanh = nn.Tanh() self.mask = None if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def applyMask(self, mask): self.mask = mask def forward(self, input, context, coverage=None): """ input (FloatTensor): batch x dim context (FloatTensor): batch x sourceL x dim coverage (FloatTensor): batch x sourceL """ batch, sourceL, dim = context.size() batch_, dim_ = input.size() aeq(batch, batch_) aeq(dim, dim_) aeq(self.dim, dim) if coverage is not None: batch_, sourceL_ = coverage.size() aeq(batch, batch_) aeq(sourceL, sourceL_) if self.mask is not None: beam_, batch_, sourceL_ = self.mask.size() aeq(batch, batch_ * beam_) aeq(sourceL, sourceL_) if coverage: context += self.linear_cover(coverage.view(-1).unsqueeze(1) ).view_as(context) context = self.tanh(context) if self.attn_type == 'dotprod': targetT = self.linear_in(input).unsqueeze(2) attn = torch.bmm(context, targetT).squeeze(2) elif self.attn_type == 'mlp': wq = self.linear_query(input).unsqueeze(1) uh = self.linear_context(context.contiguous()) wquh = uh + wq.expand_as(uh) wquh = self.tanh(wquh) attn = self.v(wquh.contiguous()).squeeze() if self.mask is not None: attn.data.masked_fill_(self.mask, -float('inf')) attn = self.sm(attn) attn3 = attn.view(attn.size(0), 1, attn.size(1)) weightedContext = torch.bmm(attn3, context).squeeze(1) if self.attn_type == 'dotprod': weightedContext = torch.cat((weightedContext, input), 1) weightedContext = self.linear_out(weightedContext) weightedContext = self.tanh(weightedContext) batch_, sourceL_ = attn.size() aeq(batch, batch_) aeq(sourceL, sourceL_) batch_, dim_ = weightedContext.size() aeq(batch, batch_) aeq(dim, dim_) return weightedContext, attn def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_3(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 1), ( 4, 1, 1), 0), out=buf1) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0 ), primals_1, out=buf4) buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_2[grid(32)](buf4, primals_2, buf5, 32, XBLOCK= 32, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 extern_kernels.mm(buf5, reinterpret_tensor(primals_4, (8, 4), (1, 8 ), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_tanh_3[grid(16)](buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) return (buf7, buf3, primals_2, buf3, buf5, buf7, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)) def aeq(*args): base = args[0] for a in args[1:]: assert a == base, str(args) class Bottle(nn.Module): def forward(self, input): if len(input.size()) <= 2: return super(Bottle, self).forward(input) size = input.size()[:2] out = super(Bottle, self).forward(input.view(size[0] * size[1], -1)) return out.contiguous().view(size[0], size[1], -1) class BottleLinear(Bottle, nn.Linear): pass class GlobalAttentionNew(nn.Module): """ Luong Attention. Global attention takes a matrix and a query vector. It then computes a parameterized convex combination of the matrix based on the input query. H_1 H_2 H_3 ... H_n q q q q | | | | \\ | | / ..... \\ | / a Constructs a unit mapping. $$(H_1 + H_n, q) => (a)$$ Where H is of `batch x n x dim` and q is of `batch x dim`. Loung Attention (dotprod): $$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.: Bahdanau Attention (mlp): $$c = \\sum_{j=1}^{SeqLength}_jh_j$$. The Alignment-function $$a$$ computes an alignment as: $$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$. """ def __init__(self, dim, coverage=False, attn_type='dotprod'): super(GlobalAttentionNew, self).__init__() self.dim = dim self.attn_type = attn_type assert self.attn_type in ['dotprod', 'mlp' ], 'Please select a valid attention type.' if self.attn_type == 'dotprod': self.linear_in = nn.Linear(dim, dim, bias=False) self.linear_out = nn.Linear(dim * 2, dim, bias=False) elif self.attn_type == 'mlp': self.linear_context = BottleLinear(dim, dim, bias=False) self.linear_query = nn.Linear(dim, dim, bias=False) self.v = BottleLinear(dim, 1, bias=False) self.sm = nn.Softmax() self.tanh = nn.Tanh() self.mask = None if coverage: self.linear_cover = nn.Linear(1, dim, bias=False) def applyMask(self, mask): self.mask = mask def forward(self, input_0, input_1): primals_2 = self.linear_in.weight primals_4 = self.linear_out.weight primals_3 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
nikhilweee/syntactic-seq2seq
GlobalAttention
false
7,350
[ "MIT" ]
1
807e524167b064fc85c91e5e2fa994de6b739455
https://github.com/nikhilweee/syntactic-seq2seq/tree/807e524167b064fc85c91e5e2fa994de6b739455
NetVLAD
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/tb/ctbeeotfqzbneeewwh2aiay5657nsb5gfe5znphkkjrpdvh7ojsn.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] # Source node to ATen node mapping: # x => pow_1, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) triton_red_fused_linalg_vector_norm_0 = async_compile.triton('triton_red_fused_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[16384, 128], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 16384 rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = (xindex // 4096) _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (524288*x1)), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + (x3), tmp3, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ef/cefdzljppvz2lfunb6uf63d2oi3ptkpnhsxqbeffjopee5fas75z.py # Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub] # Source node to ATen node mapping: # residual_10 => sub_6 # residual_100 => sub_51 # residual_102 => sub_52 # residual_104 => sub_53 # residual_106 => sub_54 # residual_108 => sub_55 # residual_110 => sub_56 # residual_112 => sub_57 # residual_114 => sub_58 # residual_116 => sub_59 # residual_118 => sub_60 # residual_12 => sub_7 # residual_120 => sub_61 # residual_122 => sub_62 # residual_124 => sub_63 # residual_126 => sub_64 # residual_14 => sub_8 # residual_16 => sub_9 # residual_18 => sub_10 # residual_2 => sub_2 # residual_20 => sub_11 # residual_22 => sub_12 # residual_24 => sub_13 # residual_26 => sub_14 # residual_28 => sub_15 # residual_30 => sub_16 # residual_32 => sub_17 # residual_34 => sub_18 # residual_36 => sub_19 # residual_38 => sub_20 # residual_4 => sub_3 # residual_40 => sub_21 # residual_42 => sub_22 # residual_44 => sub_23 # residual_46 => sub_24 # residual_48 => sub_25 # residual_50 => sub_26 # residual_52 => sub_27 # residual_54 => sub_28 # residual_56 => sub_29 # residual_58 => sub_30 # residual_6 => sub_4 # residual_60 => sub_31 # residual_62 => sub_32 # residual_64 => sub_33 # residual_66 => sub_34 # residual_68 => sub_35 # residual_70 => sub_36 # residual_72 => sub_37 # residual_74 => sub_38 # residual_76 => sub_39 # residual_78 => sub_40 # residual_8 => sub_5 # residual_80 => sub_41 # residual_82 => sub_42 # residual_84 => sub_43 # residual_86 => sub_44 # residual_88 => sub_45 # residual_90 => sub_46 # residual_92 => sub_47 # residual_94 => sub_48 # residual_96 => sub_49 # residual_98 => sub_50 # x => div # Graph fragment: # %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {}) # %sub_2 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_4), kwargs = {}) # %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_7), kwargs = {}) # %sub_4 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_10), kwargs = {}) # %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_13), kwargs = {}) # %sub_6 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_16), kwargs = {}) # %sub_7 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_19), kwargs = {}) # %sub_8 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_22), kwargs = {}) # %sub_9 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_25), kwargs = {}) # %sub_10 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_28), kwargs = {}) # %sub_11 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_31), kwargs = {}) # %sub_12 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_34), kwargs = {}) # %sub_13 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_37), kwargs = {}) # %sub_14 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_40), kwargs = {}) # %sub_15 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_43), kwargs = {}) # %sub_16 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_46), kwargs = {}) # %sub_17 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_49), kwargs = {}) # %sub_18 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_52), kwargs = {}) # %sub_19 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_55), kwargs = {}) # %sub_20 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_58), kwargs = {}) # %sub_21 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_61), kwargs = {}) # %sub_22 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_64), kwargs = {}) # %sub_23 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_67), kwargs = {}) # %sub_24 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_70), kwargs = {}) # %sub_25 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_73), kwargs = {}) # %sub_26 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_76), kwargs = {}) # %sub_27 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_79), kwargs = {}) # %sub_28 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_82), kwargs = {}) # %sub_29 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_85), kwargs = {}) # %sub_30 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_88), kwargs = {}) # %sub_31 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_91), kwargs = {}) # %sub_32 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_94), kwargs = {}) # %sub_33 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_97), kwargs = {}) # %sub_34 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_100), kwargs = {}) # %sub_35 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_103), kwargs = {}) # %sub_36 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_106), kwargs = {}) # %sub_37 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_109), kwargs = {}) # %sub_38 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_112), kwargs = {}) # %sub_39 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_115), kwargs = {}) # %sub_40 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_118), kwargs = {}) # %sub_41 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_121), kwargs = {}) # %sub_42 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_124), kwargs = {}) # %sub_43 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_127), kwargs = {}) # %sub_44 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_130), kwargs = {}) # %sub_45 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_133), kwargs = {}) # %sub_46 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_136), kwargs = {}) # %sub_47 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_139), kwargs = {}) # %sub_48 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_142), kwargs = {}) # %sub_49 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_145), kwargs = {}) # %sub_50 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_148), kwargs = {}) # %sub_51 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_151), kwargs = {}) # %sub_52 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_154), kwargs = {}) # %sub_53 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_157), kwargs = {}) # %sub_54 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_160), kwargs = {}) # %sub_55 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_163), kwargs = {}) # %sub_56 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_166), kwargs = {}) # %sub_57 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_169), kwargs = {}) # %sub_58 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_172), kwargs = {}) # %sub_59 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_175), kwargs = {}) # %sub_60 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_178), kwargs = {}) # %sub_61 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_181), kwargs = {}) # %sub_62 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_184), kwargs = {}) # %sub_63 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_187), kwargs = {}) # %sub_64 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_190), kwargs = {}) triton_poi_fused_div_sub_1 = async_compile.triton('triton_poi_fused_div_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2097152], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: '*fp32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 65, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK : tl.constexpr): xnumel = 2097152 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = (xindex // 524288) x1 = (xindex // 4096) % 128 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last') tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last') tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last') tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last') tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last') tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last') tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last') tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last') tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last') tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last') tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last') tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last') tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last') tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last') tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last') tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp5 - tmp8 tmp11 = tmp5 - tmp10 tmp13 = tmp5 - tmp12 tmp15 = tmp5 - tmp14 tmp17 = tmp5 - tmp16 tmp19 = tmp5 - tmp18 tmp21 = tmp5 - tmp20 tmp23 = tmp5 - tmp22 tmp25 = tmp5 - tmp24 tmp27 = tmp5 - tmp26 tmp29 = tmp5 - tmp28 tmp31 = tmp5 - tmp30 tmp33 = tmp5 - tmp32 tmp35 = tmp5 - tmp34 tmp37 = tmp5 - tmp36 tmp39 = tmp5 - tmp38 tmp41 = tmp5 - tmp40 tmp43 = tmp5 - tmp42 tmp45 = tmp5 - tmp44 tmp47 = tmp5 - tmp46 tmp49 = tmp5 - tmp48 tmp51 = tmp5 - tmp50 tmp53 = tmp5 - tmp52 tmp55 = tmp5 - tmp54 tmp57 = tmp5 - tmp56 tmp59 = tmp5 - tmp58 tmp61 = tmp5 - tmp60 tmp63 = tmp5 - tmp62 tmp65 = tmp5 - tmp64 tmp67 = tmp5 - tmp66 tmp69 = tmp5 - tmp68 tmp71 = tmp5 - tmp70 tmp73 = tmp5 - tmp72 tmp75 = tmp5 - tmp74 tmp77 = tmp5 - tmp76 tmp79 = tmp5 - tmp78 tmp81 = tmp5 - tmp80 tmp83 = tmp5 - tmp82 tmp85 = tmp5 - tmp84 tmp87 = tmp5 - tmp86 tmp89 = tmp5 - tmp88 tmp91 = tmp5 - tmp90 tmp93 = tmp5 - tmp92 tmp95 = tmp5 - tmp94 tmp97 = tmp5 - tmp96 tmp99 = tmp5 - tmp98 tmp101 = tmp5 - tmp100 tmp103 = tmp5 - tmp102 tmp105 = tmp5 - tmp104 tmp107 = tmp5 - tmp106 tmp109 = tmp5 - tmp108 tmp111 = tmp5 - tmp110 tmp113 = tmp5 - tmp112 tmp115 = tmp5 - tmp114 tmp117 = tmp5 - tmp116 tmp119 = tmp5 - tmp118 tmp121 = tmp5 - tmp120 tmp123 = tmp5 - tmp122 tmp125 = tmp5 - tmp124 tmp127 = tmp5 - tmp126 tmp129 = tmp5 - tmp128 tmp131 = tmp5 - tmp130 tl.store(out_ptr0 + (x3), tmp5, None) tl.store(out_ptr1 + (x3), tmp7, None) tl.store(out_ptr2 + (x3), tmp9, None) tl.store(out_ptr3 + (x3), tmp11, None) tl.store(out_ptr4 + (x3), tmp13, None) tl.store(out_ptr5 + (x3), tmp15, None) tl.store(out_ptr6 + (x3), tmp17, None) tl.store(out_ptr7 + (x3), tmp19, None) tl.store(out_ptr8 + (x3), tmp21, None) tl.store(out_ptr9 + (x3), tmp23, None) tl.store(out_ptr10 + (x3), tmp25, None) tl.store(out_ptr11 + (x3), tmp27, None) tl.store(out_ptr12 + (x3), tmp29, None) tl.store(out_ptr13 + (x3), tmp31, None) tl.store(out_ptr14 + (x3), tmp33, None) tl.store(out_ptr15 + (x3), tmp35, None) tl.store(out_ptr16 + (x3), tmp37, None) tl.store(out_ptr17 + (x3), tmp39, None) tl.store(out_ptr18 + (x3), tmp41, None) tl.store(out_ptr19 + (x3), tmp43, None) tl.store(out_ptr20 + (x3), tmp45, None) tl.store(out_ptr21 + (x3), tmp47, None) tl.store(out_ptr22 + (x3), tmp49, None) tl.store(out_ptr23 + (x3), tmp51, None) tl.store(out_ptr24 + (x3), tmp53, None) tl.store(out_ptr25 + (x3), tmp55, None) tl.store(out_ptr26 + (x3), tmp57, None) tl.store(out_ptr27 + (x3), tmp59, None) tl.store(out_ptr28 + (x3), tmp61, None) tl.store(out_ptr29 + (x3), tmp63, None) tl.store(out_ptr30 + (x3), tmp65, None) tl.store(out_ptr31 + (x3), tmp67, None) tl.store(out_ptr32 + (x3), tmp69, None) tl.store(out_ptr33 + (x3), tmp71, None) tl.store(out_ptr34 + (x3), tmp73, None) tl.store(out_ptr35 + (x3), tmp75, None) tl.store(out_ptr36 + (x3), tmp77, None) tl.store(out_ptr37 + (x3), tmp79, None) tl.store(out_ptr38 + (x3), tmp81, None) tl.store(out_ptr39 + (x3), tmp83, None) tl.store(out_ptr40 + (x3), tmp85, None) tl.store(out_ptr41 + (x3), tmp87, None) tl.store(out_ptr42 + (x3), tmp89, None) tl.store(out_ptr43 + (x3), tmp91, None) tl.store(out_ptr44 + (x3), tmp93, None) tl.store(out_ptr45 + (x3), tmp95, None) tl.store(out_ptr46 + (x3), tmp97, None) tl.store(out_ptr47 + (x3), tmp99, None) tl.store(out_ptr48 + (x3), tmp101, None) tl.store(out_ptr49 + (x3), tmp103, None) tl.store(out_ptr50 + (x3), tmp105, None) tl.store(out_ptr51 + (x3), tmp107, None) tl.store(out_ptr52 + (x3), tmp109, None) tl.store(out_ptr53 + (x3), tmp111, None) tl.store(out_ptr54 + (x3), tmp113, None) tl.store(out_ptr55 + (x3), tmp115, None) tl.store(out_ptr56 + (x3), tmp117, None) tl.store(out_ptr57 + (x3), tmp119, None) tl.store(out_ptr58 + (x3), tmp121, None) tl.store(out_ptr59 + (x3), tmp123, None) tl.store(out_ptr60 + (x3), tmp125, None) tl.store(out_ptr61 + (x3), tmp127, None) tl.store(out_ptr62 + (x3), tmp129, None) tl.store(out_ptr63 + (x3), tmp131, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/u6/cu6dgbkwo4zyodk2zqiay4hwrwemkqpxzmixog3qipqaqcevgo7u.py # Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # soft_assign_1 => amax, exp, sub, sum_2 # Graph fragment: # %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) triton_per_fused__softmax_2 = async_compile.triton('triton_per_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16384, 64], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16384 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = (xindex // 4096) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (262144*x1)), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + (x3), tmp3, None) tl.store(out_ptr1 + (x3), tmp8, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/46/c465fdmmhrzvuvb7xjrad46zallycaofrdeajo4ox533uv52dzji.py # Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # residual => sub_1 # residual_1 => mul # residual_11 => mul_5 # residual_13 => mul_6 # residual_15 => mul_7 # residual_17 => mul_8 # residual_19 => mul_9 # residual_21 => mul_10 # residual_23 => mul_11 # residual_25 => mul_12 # residual_27 => mul_13 # residual_29 => mul_14 # residual_3 => mul_1 # residual_31 => mul_15 # residual_33 => mul_16 # residual_35 => mul_17 # residual_37 => mul_18 # residual_39 => mul_19 # residual_41 => mul_20 # residual_43 => mul_21 # residual_45 => mul_22 # residual_47 => mul_23 # residual_49 => mul_24 # residual_5 => mul_2 # residual_51 => mul_25 # residual_53 => mul_26 # residual_55 => mul_27 # residual_57 => mul_28 # residual_7 => mul_3 # residual_9 => mul_4 # sum_1 => sum_3 # sum_10 => sum_12 # sum_11 => sum_13 # sum_12 => sum_14 # sum_13 => sum_15 # sum_14 => sum_16 # sum_15 => sum_17 # sum_16 => sum_18 # sum_17 => sum_19 # sum_18 => sum_20 # sum_19 => sum_21 # sum_2 => sum_4 # sum_20 => sum_22 # sum_21 => sum_23 # sum_22 => sum_24 # sum_23 => sum_25 # sum_24 => sum_26 # sum_25 => sum_27 # sum_26 => sum_28 # sum_27 => sum_29 # sum_28 => sum_30 # sum_29 => sum_31 # sum_3 => sum_5 # sum_4 => sum_6 # sum_5 => sum_7 # sum_6 => sum_8 # sum_7 => sum_9 # sum_8 => sum_10 # sum_9 => sum_11 # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %unsqueeze_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %unsqueeze_2), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %unsqueeze_5), kwargs = {}) # %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %unsqueeze_8), kwargs = {}) # %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %unsqueeze_11), kwargs = {}) # %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [-1]), kwargs = {}) # %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_5, %unsqueeze_14), kwargs = {}) # %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [-1]), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %unsqueeze_17), kwargs = {}) # %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %unsqueeze_20), kwargs = {}) # %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [-1]), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %unsqueeze_23), kwargs = {}) # %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_7, [-1]), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %unsqueeze_26), kwargs = {}) # %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [-1]), kwargs = {}) # %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %unsqueeze_29), kwargs = {}) # %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_9, [-1]), kwargs = {}) # %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %unsqueeze_32), kwargs = {}) # %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_10, [-1]), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %unsqueeze_35), kwargs = {}) # %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_11, [-1]), kwargs = {}) # %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %unsqueeze_38), kwargs = {}) # %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_12, [-1]), kwargs = {}) # %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %unsqueeze_41), kwargs = {}) # %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_13, [-1]), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %unsqueeze_44), kwargs = {}) # %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_14, [-1]), kwargs = {}) # %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %unsqueeze_47), kwargs = {}) # %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_15, [-1]), kwargs = {}) # %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %unsqueeze_50), kwargs = {}) # %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_16, [-1]), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %unsqueeze_53), kwargs = {}) # %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_17, [-1]), kwargs = {}) # %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %unsqueeze_56), kwargs = {}) # %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_18, [-1]), kwargs = {}) # %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %unsqueeze_59), kwargs = {}) # %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_19, [-1]), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %unsqueeze_62), kwargs = {}) # %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_20, [-1]), kwargs = {}) # %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_22, %unsqueeze_65), kwargs = {}) # %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_21, [-1]), kwargs = {}) # %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %unsqueeze_68), kwargs = {}) # %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_22, [-1]), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %unsqueeze_71), kwargs = {}) # %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_23, [-1]), kwargs = {}) # %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %unsqueeze_74), kwargs = {}) # %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_24, [-1]), kwargs = {}) # %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %unsqueeze_77), kwargs = {}) # %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_25, [-1]), kwargs = {}) # %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %unsqueeze_80), kwargs = {}) # %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_26, [-1]), kwargs = {}) # %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_28, %unsqueeze_83), kwargs = {}) # %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_27, [-1]), kwargs = {}) # %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %unsqueeze_86), kwargs = {}) # %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_28, [-1]), kwargs = {}) triton_red_fused_mul_sub_sum_3 = async_compile.triton('triton_red_fused_mul_sub_sum_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: 'i32', 63: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 61, 'num_reduction': 29, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') x1 = (xindex // 128) _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tl.load(in_ptr2 + (r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr2 + (4096 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp23 = tl.load(in_ptr2 + (8192 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp32 = tl.load(in_ptr2 + (12288 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp41 = tl.load(in_ptr2 + (16384 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp50 = tl.load(in_ptr2 + (20480 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp59 = tl.load(in_ptr2 + (24576 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp68 = tl.load(in_ptr2 + (28672 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr2 + (32768 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp86 = tl.load(in_ptr2 + (36864 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp95 = tl.load(in_ptr2 + (40960 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp103 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp104 = tl.load(in_ptr2 + (45056 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp112 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp113 = tl.load(in_ptr2 + (49152 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp121 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp122 = tl.load(in_ptr2 + (53248 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp130 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp131 = tl.load(in_ptr2 + (57344 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp139 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp140 = tl.load(in_ptr2 + (61440 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp148 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp149 = tl.load(in_ptr2 + (65536 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp157 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp158 = tl.load(in_ptr2 + (69632 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp167 = tl.load(in_ptr2 + (73728 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp175 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp176 = tl.load(in_ptr2 + (77824 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp184 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp185 = tl.load(in_ptr2 + (81920 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp193 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp194 = tl.load(in_ptr2 + (86016 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp202 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp203 = tl.load(in_ptr2 + (90112 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp211 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp212 = tl.load(in_ptr2 + (94208 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp220 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp221 = tl.load(in_ptr2 + (98304 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp229 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp230 = tl.load(in_ptr2 + (102400 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp239 = tl.load(in_ptr2 + (106496 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp247 = tl.load(in_ptr31 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp248 = tl.load(in_ptr2 + (110592 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp256 = tl.load(in_ptr32 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp257 = tl.load(in_ptr2 + (114688 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask & xmask, tmp12, _tmp11) tmp15 = tmp14 - tmp4 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 / tmp7 tmp18 = tmp13 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp24 = tmp23 - tmp4 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp7 tmp27 = tmp22 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp33 = tmp32 - tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 / tmp7 tmp36 = tmp31 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = _tmp38 + tmp37 _tmp38 = tl.where(rmask & xmask, tmp39, _tmp38) tmp42 = tmp41 - tmp4 tmp43 = tl_math.exp(tmp42) tmp44 = tmp43 / tmp7 tmp45 = tmp40 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = _tmp47 + tmp46 _tmp47 = tl.where(rmask & xmask, tmp48, _tmp47) tmp51 = tmp50 - tmp4 tmp52 = tl_math.exp(tmp51) tmp53 = tmp52 / tmp7 tmp54 = tmp49 * tmp53 tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp57 = _tmp56 + tmp55 _tmp56 = tl.where(rmask & xmask, tmp57, _tmp56) tmp60 = tmp59 - tmp4 tmp61 = tl_math.exp(tmp60) tmp62 = tmp61 / tmp7 tmp63 = tmp58 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask & xmask, tmp66, _tmp65) tmp69 = tmp68 - tmp4 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp7 tmp72 = tmp67 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = _tmp74 + tmp73 _tmp74 = tl.where(rmask & xmask, tmp75, _tmp74) tmp78 = tmp77 - tmp4 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp7 tmp81 = tmp76 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = _tmp83 + tmp82 _tmp83 = tl.where(rmask & xmask, tmp84, _tmp83) tmp87 = tmp86 - tmp4 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp7 tmp90 = tmp85 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = _tmp92 + tmp91 _tmp92 = tl.where(rmask & xmask, tmp93, _tmp92) tmp96 = tmp95 - tmp4 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp7 tmp99 = tmp94 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = _tmp101 + tmp100 _tmp101 = tl.where(rmask & xmask, tmp102, _tmp101) tmp105 = tmp104 - tmp4 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp7 tmp108 = tmp103 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = _tmp110 + tmp109 _tmp110 = tl.where(rmask & xmask, tmp111, _tmp110) tmp114 = tmp113 - tmp4 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp7 tmp117 = tmp112 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = _tmp119 + tmp118 _tmp119 = tl.where(rmask & xmask, tmp120, _tmp119) tmp123 = tmp122 - tmp4 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp7 tmp126 = tmp121 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = _tmp128 + tmp127 _tmp128 = tl.where(rmask & xmask, tmp129, _tmp128) tmp132 = tmp131 - tmp4 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp7 tmp135 = tmp130 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = _tmp137 + tmp136 _tmp137 = tl.where(rmask & xmask, tmp138, _tmp137) tmp141 = tmp140 - tmp4 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp7 tmp144 = tmp139 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = _tmp146 + tmp145 _tmp146 = tl.where(rmask & xmask, tmp147, _tmp146) tmp150 = tmp149 - tmp4 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp7 tmp153 = tmp148 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = _tmp155 + tmp154 _tmp155 = tl.where(rmask & xmask, tmp156, _tmp155) tmp159 = tmp158 - tmp4 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp7 tmp162 = tmp157 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = _tmp164 + tmp163 _tmp164 = tl.where(rmask & xmask, tmp165, _tmp164) tmp168 = tmp167 - tmp4 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp7 tmp171 = tmp166 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = _tmp173 + tmp172 _tmp173 = tl.where(rmask & xmask, tmp174, _tmp173) tmp177 = tmp176 - tmp4 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp7 tmp180 = tmp175 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = _tmp182 + tmp181 _tmp182 = tl.where(rmask & xmask, tmp183, _tmp182) tmp186 = tmp185 - tmp4 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp7 tmp189 = tmp184 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = _tmp191 + tmp190 _tmp191 = tl.where(rmask & xmask, tmp192, _tmp191) tmp195 = tmp194 - tmp4 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp7 tmp198 = tmp193 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = _tmp200 + tmp199 _tmp200 = tl.where(rmask & xmask, tmp201, _tmp200) tmp204 = tmp203 - tmp4 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp7 tmp207 = tmp202 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = _tmp209 + tmp208 _tmp209 = tl.where(rmask & xmask, tmp210, _tmp209) tmp213 = tmp212 - tmp4 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp7 tmp216 = tmp211 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = _tmp218 + tmp217 _tmp218 = tl.where(rmask & xmask, tmp219, _tmp218) tmp222 = tmp221 - tmp4 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp7 tmp225 = tmp220 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = _tmp227 + tmp226 _tmp227 = tl.where(rmask & xmask, tmp228, _tmp227) tmp231 = tmp230 - tmp4 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp7 tmp234 = tmp229 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = _tmp236 + tmp235 _tmp236 = tl.where(rmask & xmask, tmp237, _tmp236) tmp240 = tmp239 - tmp4 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp7 tmp243 = tmp238 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = _tmp245 + tmp244 _tmp245 = tl.where(rmask & xmask, tmp246, _tmp245) tmp249 = tmp248 - tmp4 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp7 tmp252 = tmp247 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = _tmp254 + tmp253 _tmp254 = tl.where(rmask & xmask, tmp255, _tmp254) tmp258 = tmp257 - tmp4 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp7 tmp261 = tmp256 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = _tmp263 + tmp262 _tmp263 = tl.where(rmask & xmask, tmp264, _tmp263) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + (x3), tmp11, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + (x3), tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + (x3), tmp29, xmask) tmp38 = tl.sum(_tmp38, 1)[:, None] tl.store(out_ptr3 + (x3), tmp38, xmask) tmp47 = tl.sum(_tmp47, 1)[:, None] tl.store(out_ptr4 + (x3), tmp47, xmask) tmp56 = tl.sum(_tmp56, 1)[:, None] tl.store(out_ptr5 + (x3), tmp56, xmask) tmp65 = tl.sum(_tmp65, 1)[:, None] tl.store(out_ptr6 + (x3), tmp65, xmask) tmp74 = tl.sum(_tmp74, 1)[:, None] tl.store(out_ptr7 + (x3), tmp74, xmask) tmp83 = tl.sum(_tmp83, 1)[:, None] tl.store(out_ptr8 + (x3), tmp83, xmask) tmp92 = tl.sum(_tmp92, 1)[:, None] tl.store(out_ptr9 + (x3), tmp92, xmask) tmp101 = tl.sum(_tmp101, 1)[:, None] tl.store(out_ptr10 + (x3), tmp101, xmask) tmp110 = tl.sum(_tmp110, 1)[:, None] tl.store(out_ptr11 + (x3), tmp110, xmask) tmp119 = tl.sum(_tmp119, 1)[:, None] tl.store(out_ptr12 + (x3), tmp119, xmask) tmp128 = tl.sum(_tmp128, 1)[:, None] tl.store(out_ptr13 + (x3), tmp128, xmask) tmp137 = tl.sum(_tmp137, 1)[:, None] tl.store(out_ptr14 + (x3), tmp137, xmask) tmp146 = tl.sum(_tmp146, 1)[:, None] tl.store(out_ptr15 + (x3), tmp146, xmask) tmp155 = tl.sum(_tmp155, 1)[:, None] tl.store(out_ptr16 + (x3), tmp155, xmask) tmp164 = tl.sum(_tmp164, 1)[:, None] tl.store(out_ptr17 + (x3), tmp164, xmask) tmp173 = tl.sum(_tmp173, 1)[:, None] tl.store(out_ptr18 + (x3), tmp173, xmask) tmp182 = tl.sum(_tmp182, 1)[:, None] tl.store(out_ptr19 + (x3), tmp182, xmask) tmp191 = tl.sum(_tmp191, 1)[:, None] tl.store(out_ptr20 + (x3), tmp191, xmask) tmp200 = tl.sum(_tmp200, 1)[:, None] tl.store(out_ptr21 + (x3), tmp200, xmask) tmp209 = tl.sum(_tmp209, 1)[:, None] tl.store(out_ptr22 + (x3), tmp209, xmask) tmp218 = tl.sum(_tmp218, 1)[:, None] tl.store(out_ptr23 + (x3), tmp218, xmask) tmp227 = tl.sum(_tmp227, 1)[:, None] tl.store(out_ptr24 + (x3), tmp227, xmask) tmp236 = tl.sum(_tmp236, 1)[:, None] tl.store(out_ptr25 + (x3), tmp236, xmask) tmp245 = tl.sum(_tmp245, 1)[:, None] tl.store(out_ptr26 + (x3), tmp245, xmask) tmp254 = tl.sum(_tmp254, 1)[:, None] tl.store(out_ptr27 + (x3), tmp254, xmask) tmp263 = tl.sum(_tmp263, 1)[:, None] tl.store(out_ptr28 + (x3), tmp263, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7g/c7gpcb637ns46u6bq6sgchjaxn4thmkzjpeoxhjhn2ws6dc2fyq4.py # Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # residual_101 => mul_50 # residual_103 => mul_51 # residual_105 => mul_52 # residual_107 => mul_53 # residual_109 => mul_54 # residual_111 => mul_55 # residual_113 => mul_56 # residual_59 => mul_29 # residual_61 => mul_30 # residual_63 => mul_31 # residual_65 => mul_32 # residual_67 => mul_33 # residual_69 => mul_34 # residual_71 => mul_35 # residual_73 => mul_36 # residual_75 => mul_37 # residual_77 => mul_38 # residual_79 => mul_39 # residual_81 => mul_40 # residual_83 => mul_41 # residual_85 => mul_42 # residual_87 => mul_43 # residual_89 => mul_44 # residual_91 => mul_45 # residual_93 => mul_46 # residual_95 => mul_47 # residual_97 => mul_48 # residual_99 => mul_49 # sum_30 => sum_32 # sum_31 => sum_33 # sum_32 => sum_34 # sum_33 => sum_35 # sum_34 => sum_36 # sum_35 => sum_37 # sum_36 => sum_38 # sum_37 => sum_39 # sum_38 => sum_40 # sum_39 => sum_41 # sum_40 => sum_42 # sum_41 => sum_43 # sum_42 => sum_44 # sum_43 => sum_45 # sum_44 => sum_46 # sum_45 => sum_47 # sum_46 => sum_48 # sum_47 => sum_49 # sum_48 => sum_50 # sum_49 => sum_51 # sum_50 => sum_52 # sum_51 => sum_53 # sum_52 => sum_54 # sum_53 => sum_55 # sum_54 => sum_56 # sum_55 => sum_57 # sum_56 => sum_58 # sum_57 => sum_59 # Graph fragment: # %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_30, %unsqueeze_89), kwargs = {}) # %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_29, [-1]), kwargs = {}) # %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %unsqueeze_92), kwargs = {}) # %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_30, [-1]), kwargs = {}) # %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %unsqueeze_95), kwargs = {}) # %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_31, [-1]), kwargs = {}) # %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %unsqueeze_98), kwargs = {}) # %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_32, [-1]), kwargs = {}) # %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %unsqueeze_101), kwargs = {}) # %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_33, [-1]), kwargs = {}) # %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %unsqueeze_104), kwargs = {}) # %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_34, [-1]), kwargs = {}) # %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_36, %unsqueeze_107), kwargs = {}) # %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_35, [-1]), kwargs = {}) # %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_37, %unsqueeze_110), kwargs = {}) # %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_36, [-1]), kwargs = {}) # %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %unsqueeze_113), kwargs = {}) # %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_37, [-1]), kwargs = {}) # %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %unsqueeze_116), kwargs = {}) # %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_38, [-1]), kwargs = {}) # %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_40, %unsqueeze_119), kwargs = {}) # %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_39, [-1]), kwargs = {}) # %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %unsqueeze_122), kwargs = {}) # %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_40, [-1]), kwargs = {}) # %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_42, %unsqueeze_125), kwargs = {}) # %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_41, [-1]), kwargs = {}) # %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_43, %unsqueeze_128), kwargs = {}) # %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_42, [-1]), kwargs = {}) # %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %unsqueeze_131), kwargs = {}) # %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_43, [-1]), kwargs = {}) # %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_45, %unsqueeze_134), kwargs = {}) # %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_44, [-1]), kwargs = {}) # %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_46, %unsqueeze_137), kwargs = {}) # %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_45, [-1]), kwargs = {}) # %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %unsqueeze_140), kwargs = {}) # %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_46, [-1]), kwargs = {}) # %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_48, %unsqueeze_143), kwargs = {}) # %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_47, [-1]), kwargs = {}) # %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_49, %unsqueeze_146), kwargs = {}) # %sum_51 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_48, [-1]), kwargs = {}) # %mul_49 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %unsqueeze_149), kwargs = {}) # %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_49, [-1]), kwargs = {}) # %mul_50 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_51, %unsqueeze_152), kwargs = {}) # %sum_53 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_50, [-1]), kwargs = {}) # %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_52, %unsqueeze_155), kwargs = {}) # %sum_54 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_51, [-1]), kwargs = {}) # %mul_52 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_53, %unsqueeze_158), kwargs = {}) # %sum_55 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_52, [-1]), kwargs = {}) # %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_54, %unsqueeze_161), kwargs = {}) # %sum_56 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_53, [-1]), kwargs = {}) # %mul_54 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_55, %unsqueeze_164), kwargs = {}) # %sum_57 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_54, [-1]), kwargs = {}) # %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_56, %unsqueeze_167), kwargs = {}) # %sum_58 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_55, [-1]), kwargs = {}) # %mul_56 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %unsqueeze_170), kwargs = {}) # %sum_59 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_56, [-1]), kwargs = {}) triton_red_fused_mul_sum_4 = async_compile.triton('triton_red_fused_mul_sum_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: 'i32', 60: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 58, 'num_reduction': 28, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = (xindex // 128) _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (118784 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (122880 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (126976 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (131072 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (135168 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (139264 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (143360 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.load(in_ptr10 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp66 = tl.load(in_ptr1 + (147456 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.load(in_ptr11 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp75 = tl.load(in_ptr1 + (151552 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tl.load(in_ptr12 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp84 = tl.load(in_ptr1 + (155648 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp92 = tl.load(in_ptr13 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp93 = tl.load(in_ptr1 + (159744 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp101 = tl.load(in_ptr14 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp102 = tl.load(in_ptr1 + (163840 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp110 = tl.load(in_ptr15 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp111 = tl.load(in_ptr1 + (167936 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp119 = tl.load(in_ptr16 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp120 = tl.load(in_ptr1 + (172032 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp128 = tl.load(in_ptr17 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp129 = tl.load(in_ptr1 + (176128 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.load(in_ptr18 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp138 = tl.load(in_ptr1 + (180224 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.load(in_ptr19 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp147 = tl.load(in_ptr1 + (184320 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp155 = tl.load(in_ptr20 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp156 = tl.load(in_ptr1 + (188416 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp164 = tl.load(in_ptr21 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp165 = tl.load(in_ptr1 + (192512 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp173 = tl.load(in_ptr22 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp174 = tl.load(in_ptr1 + (196608 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp182 = tl.load(in_ptr23 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp183 = tl.load(in_ptr1 + (200704 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp191 = tl.load(in_ptr24 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp192 = tl.load(in_ptr1 + (204800 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp200 = tl.load(in_ptr25 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp201 = tl.load(in_ptr1 + (208896 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.load(in_ptr26 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp210 = tl.load(in_ptr1 + (212992 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.load(in_ptr27 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp219 = tl.load(in_ptr1 + (217088 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp227 = tl.load(in_ptr28 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp228 = tl.load(in_ptr1 + (221184 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp236 = tl.load(in_ptr29 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp237 = tl.load(in_ptr1 + (225280 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp245 = tl.load(in_ptr30 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp246 = tl.load(in_ptr1 + (229376 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp67 = tmp66 - tmp2 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 / tmp5 tmp70 = tmp65 * tmp69 tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK]) tmp73 = _tmp72 + tmp71 _tmp72 = tl.where(rmask & xmask, tmp73, _tmp72) tmp76 = tmp75 - tmp2 tmp77 = tl_math.exp(tmp76) tmp78 = tmp77 / tmp5 tmp79 = tmp74 * tmp78 tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK]) tmp82 = _tmp81 + tmp80 _tmp81 = tl.where(rmask & xmask, tmp82, _tmp81) tmp85 = tmp84 - tmp2 tmp86 = tl_math.exp(tmp85) tmp87 = tmp86 / tmp5 tmp88 = tmp83 * tmp87 tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = _tmp90 + tmp89 _tmp90 = tl.where(rmask & xmask, tmp91, _tmp90) tmp94 = tmp93 - tmp2 tmp95 = tl_math.exp(tmp94) tmp96 = tmp95 / tmp5 tmp97 = tmp92 * tmp96 tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK]) tmp100 = _tmp99 + tmp98 _tmp99 = tl.where(rmask & xmask, tmp100, _tmp99) tmp103 = tmp102 - tmp2 tmp104 = tl_math.exp(tmp103) tmp105 = tmp104 / tmp5 tmp106 = tmp101 * tmp105 tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK]) tmp109 = _tmp108 + tmp107 _tmp108 = tl.where(rmask & xmask, tmp109, _tmp108) tmp112 = tmp111 - tmp2 tmp113 = tl_math.exp(tmp112) tmp114 = tmp113 / tmp5 tmp115 = tmp110 * tmp114 tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK]) tmp118 = _tmp117 + tmp116 _tmp117 = tl.where(rmask & xmask, tmp118, _tmp117) tmp121 = tmp120 - tmp2 tmp122 = tl_math.exp(tmp121) tmp123 = tmp122 / tmp5 tmp124 = tmp119 * tmp123 tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = _tmp126 + tmp125 _tmp126 = tl.where(rmask & xmask, tmp127, _tmp126) tmp130 = tmp129 - tmp2 tmp131 = tl_math.exp(tmp130) tmp132 = tmp131 / tmp5 tmp133 = tmp128 * tmp132 tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = _tmp135 + tmp134 _tmp135 = tl.where(rmask & xmask, tmp136, _tmp135) tmp139 = tmp138 - tmp2 tmp140 = tl_math.exp(tmp139) tmp141 = tmp140 / tmp5 tmp142 = tmp137 * tmp141 tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK]) tmp145 = _tmp144 + tmp143 _tmp144 = tl.where(rmask & xmask, tmp145, _tmp144) tmp148 = tmp147 - tmp2 tmp149 = tl_math.exp(tmp148) tmp150 = tmp149 / tmp5 tmp151 = tmp146 * tmp150 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = _tmp153 + tmp152 _tmp153 = tl.where(rmask & xmask, tmp154, _tmp153) tmp157 = tmp156 - tmp2 tmp158 = tl_math.exp(tmp157) tmp159 = tmp158 / tmp5 tmp160 = tmp155 * tmp159 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = _tmp162 + tmp161 _tmp162 = tl.where(rmask & xmask, tmp163, _tmp162) tmp166 = tmp165 - tmp2 tmp167 = tl_math.exp(tmp166) tmp168 = tmp167 / tmp5 tmp169 = tmp164 * tmp168 tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK]) tmp172 = _tmp171 + tmp170 _tmp171 = tl.where(rmask & xmask, tmp172, _tmp171) tmp175 = tmp174 - tmp2 tmp176 = tl_math.exp(tmp175) tmp177 = tmp176 / tmp5 tmp178 = tmp173 * tmp177 tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK]) tmp181 = _tmp180 + tmp179 _tmp180 = tl.where(rmask & xmask, tmp181, _tmp180) tmp184 = tmp183 - tmp2 tmp185 = tl_math.exp(tmp184) tmp186 = tmp185 / tmp5 tmp187 = tmp182 * tmp186 tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp190 = _tmp189 + tmp188 _tmp189 = tl.where(rmask & xmask, tmp190, _tmp189) tmp193 = tmp192 - tmp2 tmp194 = tl_math.exp(tmp193) tmp195 = tmp194 / tmp5 tmp196 = tmp191 * tmp195 tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK]) tmp199 = _tmp198 + tmp197 _tmp198 = tl.where(rmask & xmask, tmp199, _tmp198) tmp202 = tmp201 - tmp2 tmp203 = tl_math.exp(tmp202) tmp204 = tmp203 / tmp5 tmp205 = tmp200 * tmp204 tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK]) tmp208 = _tmp207 + tmp206 _tmp207 = tl.where(rmask & xmask, tmp208, _tmp207) tmp211 = tmp210 - tmp2 tmp212 = tl_math.exp(tmp211) tmp213 = tmp212 / tmp5 tmp214 = tmp209 * tmp213 tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK]) tmp217 = _tmp216 + tmp215 _tmp216 = tl.where(rmask & xmask, tmp217, _tmp216) tmp220 = tmp219 - tmp2 tmp221 = tl_math.exp(tmp220) tmp222 = tmp221 / tmp5 tmp223 = tmp218 * tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = _tmp225 + tmp224 _tmp225 = tl.where(rmask & xmask, tmp226, _tmp225) tmp229 = tmp228 - tmp2 tmp230 = tl_math.exp(tmp229) tmp231 = tmp230 / tmp5 tmp232 = tmp227 * tmp231 tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK]) tmp235 = _tmp234 + tmp233 _tmp234 = tl.where(rmask & xmask, tmp235, _tmp234) tmp238 = tmp237 - tmp2 tmp239 = tl_math.exp(tmp238) tmp240 = tmp239 / tmp5 tmp241 = tmp236 * tmp240 tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK]) tmp244 = _tmp243 + tmp242 _tmp243 = tl.where(rmask & xmask, tmp244, _tmp243) tmp247 = tmp246 - tmp2 tmp248 = tl_math.exp(tmp247) tmp249 = tmp248 / tmp5 tmp250 = tmp245 * tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = _tmp252 + tmp251 _tmp252 = tl.where(rmask & xmask, tmp253, _tmp252) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + (x3), tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + (x3), tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + (x3), tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + (x3), tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + (x3), tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + (x3), tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + (x3), tmp63, xmask) tmp72 = tl.sum(_tmp72, 1)[:, None] tl.store(out_ptr7 + (x3), tmp72, xmask) tmp81 = tl.sum(_tmp81, 1)[:, None] tl.store(out_ptr8 + (x3), tmp81, xmask) tmp90 = tl.sum(_tmp90, 1)[:, None] tl.store(out_ptr9 + (x3), tmp90, xmask) tmp99 = tl.sum(_tmp99, 1)[:, None] tl.store(out_ptr10 + (x3), tmp99, xmask) tmp108 = tl.sum(_tmp108, 1)[:, None] tl.store(out_ptr11 + (x3), tmp108, xmask) tmp117 = tl.sum(_tmp117, 1)[:, None] tl.store(out_ptr12 + (x3), tmp117, xmask) tmp126 = tl.sum(_tmp126, 1)[:, None] tl.store(out_ptr13 + (x3), tmp126, xmask) tmp135 = tl.sum(_tmp135, 1)[:, None] tl.store(out_ptr14 + (x3), tmp135, xmask) tmp144 = tl.sum(_tmp144, 1)[:, None] tl.store(out_ptr15 + (x3), tmp144, xmask) tmp153 = tl.sum(_tmp153, 1)[:, None] tl.store(out_ptr16 + (x3), tmp153, xmask) tmp162 = tl.sum(_tmp162, 1)[:, None] tl.store(out_ptr17 + (x3), tmp162, xmask) tmp171 = tl.sum(_tmp171, 1)[:, None] tl.store(out_ptr18 + (x3), tmp171, xmask) tmp180 = tl.sum(_tmp180, 1)[:, None] tl.store(out_ptr19 + (x3), tmp180, xmask) tmp189 = tl.sum(_tmp189, 1)[:, None] tl.store(out_ptr20 + (x3), tmp189, xmask) tmp198 = tl.sum(_tmp198, 1)[:, None] tl.store(out_ptr21 + (x3), tmp198, xmask) tmp207 = tl.sum(_tmp207, 1)[:, None] tl.store(out_ptr22 + (x3), tmp207, xmask) tmp216 = tl.sum(_tmp216, 1)[:, None] tl.store(out_ptr23 + (x3), tmp216, xmask) tmp225 = tl.sum(_tmp225, 1)[:, None] tl.store(out_ptr24 + (x3), tmp225, xmask) tmp234 = tl.sum(_tmp234, 1)[:, None] tl.store(out_ptr25 + (x3), tmp234, xmask) tmp243 = tl.sum(_tmp243, 1)[:, None] tl.store(out_ptr26 + (x3), tmp243, xmask) tmp252 = tl.sum(_tmp252, 1)[:, None] tl.store(out_ptr27 + (x3), tmp252, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/m5/cm5hatbwewijgqsezz7mpghb6gtaqevomtlc673msign42fqnq42.py # Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum] # Source node to ATen node mapping: # residual_115 => mul_57 # residual_117 => mul_58 # residual_119 => mul_59 # residual_121 => mul_60 # residual_123 => mul_61 # residual_125 => mul_62 # residual_127 => mul_63 # sum_58 => sum_60 # sum_59 => sum_61 # sum_60 => sum_62 # sum_61 => sum_63 # sum_62 => sum_64 # sum_63 => sum_65 # sum_64 => sum_66 # Graph fragment: # %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_58, %unsqueeze_173), kwargs = {}) # %sum_60 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_57, [-1]), kwargs = {}) # %mul_58 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_59, %unsqueeze_176), kwargs = {}) # %sum_61 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_58, [-1]), kwargs = {}) # %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_60, %unsqueeze_179), kwargs = {}) # %sum_62 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_59, [-1]), kwargs = {}) # %mul_60 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_61, %unsqueeze_182), kwargs = {}) # %sum_63 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_60, [-1]), kwargs = {}) # %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_62, %unsqueeze_185), kwargs = {}) # %sum_64 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_61, [-1]), kwargs = {}) # %mul_62 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_63, %unsqueeze_188), kwargs = {}) # %sum_65 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_62, [-1]), kwargs = {}) # %mul_63 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_64, %unsqueeze_191), kwargs = {}) # %sum_66 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_63, [-1]), kwargs = {}) triton_red_fused_mul_sum_5 = async_compile.triton('triton_red_fused_mul_sum_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[512, 4096], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32', 18: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mul_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 7, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = (xindex // 128) _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (233472 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + (4096*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (237568 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (241664 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (245760 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (249856 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (253952 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (258048 + r2 + (262144*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + (x3), tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + (x3), tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + (x3), tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + (x3), tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + (x3), tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + (x3), tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + (x3), tmp63, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ew/cewrqjskzvue6r2kcna4men3gingd3ajhrksiyyptwu2fliqalf7.py # Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm] # Source node to ATen node mapping: # setitem => copy # setitem_1 => copy_1 # setitem_10 => copy_10 # setitem_11 => copy_11 # setitem_12 => copy_12 # setitem_13 => copy_13 # setitem_14 => copy_14 # setitem_15 => copy_15 # setitem_16 => copy_16 # setitem_17 => copy_17 # setitem_18 => copy_18 # setitem_19 => copy_19 # setitem_2 => copy_2 # setitem_20 => copy_20 # setitem_21 => copy_21 # setitem_22 => copy_22 # setitem_23 => copy_23 # setitem_24 => copy_24 # setitem_25 => copy_25 # setitem_26 => copy_26 # setitem_27 => copy_27 # setitem_28 => copy_28 # setitem_29 => copy_29 # setitem_3 => copy_3 # setitem_30 => copy_30 # setitem_31 => copy_31 # setitem_32 => copy_32 # setitem_33 => copy_33 # setitem_34 => copy_34 # setitem_35 => copy_35 # setitem_36 => copy_36 # setitem_37 => copy_37 # setitem_38 => copy_38 # setitem_39 => copy_39 # setitem_4 => copy_4 # setitem_40 => copy_40 # setitem_41 => copy_41 # setitem_42 => copy_42 # setitem_43 => copy_43 # setitem_44 => copy_44 # setitem_45 => copy_45 # setitem_46 => copy_46 # setitem_47 => copy_47 # setitem_48 => copy_48 # setitem_49 => copy_49 # setitem_5 => copy_5 # setitem_50 => copy_50 # setitem_51 => copy_51 # setitem_52 => copy_52 # setitem_53 => copy_53 # setitem_54 => copy_54 # setitem_55 => copy_55 # setitem_56 => copy_56 # setitem_57 => copy_57 # setitem_58 => copy_58 # setitem_59 => copy_59 # setitem_6 => copy_6 # setitem_60 => copy_60 # setitem_61 => copy_61 # setitem_62 => copy_62 # setitem_63 => copy_63 # setitem_7 => copy_7 # setitem_8 => copy_8 # setitem_9 => copy_9 # vlad => full # vlad_1 => pow_3, pow_4, sum_67 # Graph fragment: # %full : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 64, 128], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_7, %sum_3), kwargs = {}) # %slice_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%full, %copy, 1, 0, 1), kwargs = {}) # %copy_1 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_26, %sum_4), kwargs = {}) # %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default, %copy_1, 1, 1, 2), kwargs = {}) # %copy_2 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_45, %sum_5), kwargs = {}) # %slice_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %copy_2, 1, 2, 3), kwargs = {}) # %copy_3 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_64, %sum_6), kwargs = {}) # %slice_scatter_default_3 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_2, %copy_3, 1, 3, 4), kwargs = {}) # %copy_4 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_83, %sum_7), kwargs = {}) # %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_3, %copy_4, 1, 4, 5), kwargs = {}) # %copy_5 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_102, %sum_8), kwargs = {}) # %slice_scatter_default_5 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %copy_5, 1, 5, 6), kwargs = {}) # %copy_6 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_121, %sum_9), kwargs = {}) # %slice_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_5, %copy_6, 1, 6, 7), kwargs = {}) # %copy_7 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_140, %sum_10), kwargs = {}) # %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_6, %copy_7, 1, 7, 8), kwargs = {}) # %copy_8 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_159, %sum_11), kwargs = {}) # %slice_scatter_default_8 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %copy_8, 1, 8, 9), kwargs = {}) # %copy_9 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_178, %sum_12), kwargs = {}) # %slice_scatter_default_9 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_8, %copy_9, 1, 9, 10), kwargs = {}) # %copy_10 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_197, %sum_13), kwargs = {}) # %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_9, %copy_10, 1, 10, 11), kwargs = {}) # %copy_11 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_216, %sum_14), kwargs = {}) # %slice_scatter_default_11 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %copy_11, 1, 11, 12), kwargs = {}) # %copy_12 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_235, %sum_15), kwargs = {}) # %slice_scatter_default_12 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_11, %copy_12, 1, 12, 13), kwargs = {}) # %copy_13 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_254, %sum_16), kwargs = {}) # %slice_scatter_default_13 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_12, %copy_13, 1, 13, 14), kwargs = {}) # %copy_14 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_273, %sum_17), kwargs = {}) # %slice_scatter_default_14 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_13, %copy_14, 1, 14, 15), kwargs = {}) # %copy_15 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_292, %sum_18), kwargs = {}) # %slice_scatter_default_15 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_14, %copy_15, 1, 15, 16), kwargs = {}) # %copy_16 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_311, %sum_19), kwargs = {}) # %slice_scatter_default_16 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_15, %copy_16, 1, 16, 17), kwargs = {}) # %copy_17 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_330, %sum_20), kwargs = {}) # %slice_scatter_default_17 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_16, %copy_17, 1, 17, 18), kwargs = {}) # %copy_18 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_349, %sum_21), kwargs = {}) # %slice_scatter_default_18 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_17, %copy_18, 1, 18, 19), kwargs = {}) # %copy_19 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_368, %sum_22), kwargs = {}) # %slice_scatter_default_19 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_18, %copy_19, 1, 19, 20), kwargs = {}) # %copy_20 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_387, %sum_23), kwargs = {}) # %slice_scatter_default_20 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_19, %copy_20, 1, 20, 21), kwargs = {}) # %copy_21 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_406, %sum_24), kwargs = {}) # %slice_scatter_default_21 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_20, %copy_21, 1, 21, 22), kwargs = {}) # %copy_22 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_425, %sum_25), kwargs = {}) # %slice_scatter_default_22 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_21, %copy_22, 1, 22, 23), kwargs = {}) # %copy_23 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_444, %sum_26), kwargs = {}) # %slice_scatter_default_23 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_22, %copy_23, 1, 23, 24), kwargs = {}) # %copy_24 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_463, %sum_27), kwargs = {}) # %slice_scatter_default_24 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_23, %copy_24, 1, 24, 25), kwargs = {}) # %copy_25 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_482, %sum_28), kwargs = {}) # %slice_scatter_default_25 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_24, %copy_25, 1, 25, 26), kwargs = {}) # %copy_26 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_501, %sum_29), kwargs = {}) # %slice_scatter_default_26 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_25, %copy_26, 1, 26, 27), kwargs = {}) # %copy_27 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_520, %sum_30), kwargs = {}) # %slice_scatter_default_27 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_26, %copy_27, 1, 27, 28), kwargs = {}) # %copy_28 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_539, %sum_31), kwargs = {}) # %slice_scatter_default_28 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_27, %copy_28, 1, 28, 29), kwargs = {}) # %copy_29 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_558, %sum_32), kwargs = {}) # %slice_scatter_default_29 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_28, %copy_29, 1, 29, 30), kwargs = {}) # %copy_30 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_577, %sum_33), kwargs = {}) # %slice_scatter_default_30 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_29, %copy_30, 1, 30, 31), kwargs = {}) # %copy_31 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_596, %sum_34), kwargs = {}) # %slice_scatter_default_31 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_30, %copy_31, 1, 31, 32), kwargs = {}) # %copy_32 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_615, %sum_35), kwargs = {}) # %slice_scatter_default_32 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_31, %copy_32, 1, 32, 33), kwargs = {}) # %copy_33 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_634, %sum_36), kwargs = {}) # %slice_scatter_default_33 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_32, %copy_33, 1, 33, 34), kwargs = {}) # %copy_34 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_653, %sum_37), kwargs = {}) # %slice_scatter_default_34 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_33, %copy_34, 1, 34, 35), kwargs = {}) # %copy_35 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_672, %sum_38), kwargs = {}) # %slice_scatter_default_35 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_34, %copy_35, 1, 35, 36), kwargs = {}) # %copy_36 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_691, %sum_39), kwargs = {}) # %slice_scatter_default_36 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_35, %copy_36, 1, 36, 37), kwargs = {}) # %copy_37 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_710, %sum_40), kwargs = {}) # %slice_scatter_default_37 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_36, %copy_37, 1, 37, 38), kwargs = {}) # %copy_38 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_729, %sum_41), kwargs = {}) # %slice_scatter_default_38 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_37, %copy_38, 1, 38, 39), kwargs = {}) # %copy_39 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_748, %sum_42), kwargs = {}) # %slice_scatter_default_39 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_38, %copy_39, 1, 39, 40), kwargs = {}) # %copy_40 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_767, %sum_43), kwargs = {}) # %slice_scatter_default_40 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_39, %copy_40, 1, 40, 41), kwargs = {}) # %copy_41 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_786, %sum_44), kwargs = {}) # %slice_scatter_default_41 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_40, %copy_41, 1, 41, 42), kwargs = {}) # %copy_42 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_805, %sum_45), kwargs = {}) # %slice_scatter_default_42 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_41, %copy_42, 1, 42, 43), kwargs = {}) # %copy_43 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_824, %sum_46), kwargs = {}) # %slice_scatter_default_43 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_42, %copy_43, 1, 43, 44), kwargs = {}) # %copy_44 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_843, %sum_47), kwargs = {}) # %slice_scatter_default_44 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_43, %copy_44, 1, 44, 45), kwargs = {}) # %copy_45 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_862, %sum_48), kwargs = {}) # %slice_scatter_default_45 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_44, %copy_45, 1, 45, 46), kwargs = {}) # %copy_46 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_881, %sum_49), kwargs = {}) # %slice_scatter_default_46 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_45, %copy_46, 1, 46, 47), kwargs = {}) # %copy_47 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_900, %sum_50), kwargs = {}) # %slice_scatter_default_47 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_46, %copy_47, 1, 47, 48), kwargs = {}) # %copy_48 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_919, %sum_51), kwargs = {}) # %slice_scatter_default_48 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_47, %copy_48, 1, 48, 49), kwargs = {}) # %copy_49 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_938, %sum_52), kwargs = {}) # %slice_scatter_default_49 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_48, %copy_49, 1, 49, 50), kwargs = {}) # %copy_50 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_957, %sum_53), kwargs = {}) # %slice_scatter_default_50 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_49, %copy_50, 1, 50, 51), kwargs = {}) # %copy_51 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_976, %sum_54), kwargs = {}) # %slice_scatter_default_51 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_50, %copy_51, 1, 51, 52), kwargs = {}) # %copy_52 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_995, %sum_55), kwargs = {}) # %slice_scatter_default_52 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_51, %copy_52, 1, 52, 53), kwargs = {}) # %copy_53 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1014, %sum_56), kwargs = {}) # %slice_scatter_default_53 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_52, %copy_53, 1, 53, 54), kwargs = {}) # %copy_54 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1033, %sum_57), kwargs = {}) # %slice_scatter_default_54 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_53, %copy_54, 1, 54, 55), kwargs = {}) # %copy_55 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1052, %sum_58), kwargs = {}) # %slice_scatter_default_55 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_54, %copy_55, 1, 55, 56), kwargs = {}) # %copy_56 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1071, %sum_59), kwargs = {}) # %slice_scatter_default_56 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_55, %copy_56, 1, 56, 57), kwargs = {}) # %copy_57 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1090, %sum_60), kwargs = {}) # %slice_scatter_default_57 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_56, %copy_57, 1, 57, 58), kwargs = {}) # %copy_58 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1109, %sum_61), kwargs = {}) # %slice_scatter_default_58 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_57, %copy_58, 1, 58, 59), kwargs = {}) # %copy_59 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1128, %sum_62), kwargs = {}) # %slice_scatter_default_59 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_58, %copy_59, 1, 59, 60), kwargs = {}) # %copy_60 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1147, %sum_63), kwargs = {}) # %slice_scatter_default_60 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_59, %copy_60, 1, 60, 61), kwargs = {}) # %copy_61 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1166, %sum_64), kwargs = {}) # %slice_scatter_default_61 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_60, %copy_61, 1, 61, 62), kwargs = {}) # %copy_62 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1185, %sum_65), kwargs = {}) # %slice_scatter_default_62 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_61, %copy_62, 1, 62, 63), kwargs = {}) # %copy_63 : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%slice_1204, %sum_66), kwargs = {}) # %slice_scatter_default_63 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_62, %copy_63, 1, 63, 64), kwargs = {}) # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%slice_scatter_default_63, 2), kwargs = {}) # %sum_67 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [2], True), kwargs = {}) # %pow_4 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_67, 0.5), kwargs = {}) triton_per_fused_copy_linalg_vector_norm_zeros_6 = async_compile.triton('triton_per_fused_copy_linalg_vector_norm_zeros_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[256, 128], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: '*fp32', 33: '*fp32', 34: '*fp32', 35: '*fp32', 36: '*fp32', 37: '*fp32', 38: '*fp32', 39: '*fp32', 40: '*fp32', 41: '*fp32', 42: '*fp32', 43: '*fp32', 44: '*fp32', 45: '*fp32', 46: '*fp32', 47: '*fp32', 48: '*fp32', 49: '*fp32', 50: '*fp32', 51: '*fp32', 52: '*fp32', 53: '*fp32', 54: '*fp32', 55: '*fp32', 56: '*fp32', 57: '*fp32', 58: '*fp32', 59: '*fp32', 60: '*fp32', 61: '*fp32', 62: '*fp32', 63: '*fp32', 64: '*fp32', 65: '*fp32', 66: 'i32', 67: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_copy_linalg_vector_norm_zeros_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 64, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 128 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 64 r2 = rindex x1 = (xindex // 64) x3 = xindex tmp0 = x0 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (r2 + (128*x1)), tmp5 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (r2 + (128*x1)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (r2 + (128*x1)), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.full([1, 1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (r2 + (128*x1)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (r2 + (128*x1)), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1, 1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1, 1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (r2 + (128*x1)), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.full([1, 1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (r2 + (128*x1)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tl.full([1, 1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (r2 + (128*x1)), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (r2 + (128*x1)), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1, 1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1, 1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (r2 + (128*x1)), tmp58 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.full([1, 1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (r2 + (128*x1)), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1, 1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (r2 + (128*x1)), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (r2 + (128*x1)), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1, 1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1, 1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (r2 + (128*x1)), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1, 1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (r2 + (128*x1)), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1, 1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (r2 + (128*x1)), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (r2 + (128*x1)), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1, 1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1, 1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (r2 + (128*x1)), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1, 1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (r2 + (128*x1)), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1, 1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (r2 + (128*x1)), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (r2 + (128*x1)), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1, 1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1, 1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (r2 + (128*x1)), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1, 1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (r2 + (128*x1)), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1, 1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (r2 + (128*x1)), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (r2 + (128*x1)), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1, 1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1, 1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (r2 + (128*x1)), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1, 1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (r2 + (128*x1)), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1, 1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (r2 + (128*x1)), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (r2 + (128*x1)), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1, 1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1, 1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (r2 + (128*x1)), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1, 1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (r2 + (128*x1)), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1, 1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (r2 + (128*x1)), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (r2 + (128*x1)), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1, 1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1, 1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (r2 + (128*x1)), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1, 1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (r2 + (128*x1)), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1, 1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (r2 + (128*x1)), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (r2 + (128*x1)), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1, 1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1, 1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (r2 + (128*x1)), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1, 1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (r2 + (128*x1)), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1, 1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (r2 + (128*x1)), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (r2 + (128*x1)), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1, 1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1, 1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (r2 + (128*x1)), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1, 1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (r2 + (128*x1)), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1, 1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (r2 + (128*x1)), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (r2 + (128*x1)), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1, 1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1, 1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (r2 + (128*x1)), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1, 1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (r2 + (128*x1)), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1, 1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (r2 + (128*x1)), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (r2 + (128*x1)), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1, 1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1, 1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (r2 + (128*x1)), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1, 1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (r2 + (128*x1)), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1, 1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (r2 + (128*x1)), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (r2 + (128*x1)), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1, 1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1, 1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (r2 + (128*x1)), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1, 1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (r2 + (128*x1)), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1, 1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (r2 + (128*x1)), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (r2 + (128*x1)), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1, 1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1, 1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (r2 + (128*x1)), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1, 1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (r2 + (128*x1)), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1, 1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (r2 + (128*x1)), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (r2 + (128*x1)), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1, 1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (r2 + (128*x1)), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1, 1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (r2 + (128*x1)), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (r2 + (128*x1)), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tmp381 = tmp380 * tmp380 tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK]) tmp384 = tl.where(xmask, tmp382, 0) tmp385 = tl.sum(tmp384, 1)[:, None] tmp386 = libdevice.sqrt(tmp385) tl.store(in_out_ptr0 + (r2 + (128*x3)), tmp380, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + (x3), tmp386, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/t2/ct2f2hshheqz3ic3c445uhobgzuy7jfkonekptjcv4yqglojftmh.py # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div] # Source node to ATen node mapping: # vlad_3 => div_3, pow_5, pow_6, sum_68 # Graph fragment: # %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_2, 2), kwargs = {}) # %sum_68 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {}) # %pow_6 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_68, 0.5), kwargs = {}) # %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %expand_66), kwargs = {}) triton_red_fused_div_linalg_vector_norm_7 = async_compile.triton('triton_red_fused_div_linalg_vector_norm_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.reduction( size_hints=[4, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_div_linalg_vector_norm_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + ((64*x0) + (r1 // 128)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + (8192*x0)), tmp16, rmask & xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.linalg_vector_norm] stream0 = get_raw_stream(0) triton_red_fused_linalg_vector_norm_0.run(primals_1, buf0, 16384, 128, grid=grid(16384), stream=stream0) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) # Topologically Sorted Source Nodes: [x, residual_2, residual_4, residual_6, residual_8, residual_10, residual_12, residual_14, residual_16, residual_18, residual_20, residual_22, residual_24, residual_26, residual_28, residual_30, residual_32, residual_34, residual_36, residual_38, residual_40, residual_42, residual_44, residual_46, residual_48, residual_50, residual_52, residual_54, residual_56, residual_58, residual_60, residual_62, residual_64, residual_66, residual_68, residual_70, residual_72, residual_74, residual_76, residual_78, residual_80, residual_82, residual_84, residual_86, residual_88, residual_90, residual_92, residual_94, residual_96, residual_98, residual_100, residual_102, residual_104, residual_106, residual_108, residual_110, residual_112, residual_114, residual_116, residual_118, residual_120, residual_122, residual_124, residual_126], Original ATen: [aten.div, aten.sub] triton_poi_fused_div_sub_1.run(primals_1, buf0, primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, 2097152, grid=grid(2097152), stream=stream0) del primals_1 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) # Topologically Sorted Source Nodes: [soft_assign_1], Original ATen: [aten._softmax] triton_per_fused__softmax_2.run(buf2, buf3, buf4, 16384, 64, grid=grid(16384), stream=stream0) buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual, residual_1, sum_1, residual_3, sum_2, residual_5, sum_3, residual_7, sum_4, residual_9, sum_5, residual_11, sum_6, residual_13, sum_7, residual_15, sum_8, residual_17, sum_9, residual_19, sum_10, residual_21, sum_11, residual_23, sum_12, residual_25, sum_13, residual_27, sum_14, residual_29, sum_15, residual_31, sum_16, residual_33, sum_17, residual_35, sum_18, residual_37, sum_19, residual_39, sum_20, residual_41, sum_21, residual_43, sum_22, residual_45, sum_23, residual_47, sum_24, residual_49, sum_25, residual_51, sum_26, residual_53, sum_27, residual_55, sum_28, residual_57, sum_29], Original ATen: [aten.sub, aten.mul, aten.sum] triton_red_fused_mul_sub_sum_3.run(buf1, primals_3, buf2, buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, 512, 4096, grid=grid(512), stream=stream0) buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual_59, sum_30, residual_61, sum_31, residual_63, sum_32, residual_65, sum_33, residual_67, sum_34, residual_69, sum_35, residual_71, sum_36, residual_73, sum_37, residual_75, sum_38, residual_77, sum_39, residual_79, sum_40, residual_81, sum_41, residual_83, sum_42, residual_85, sum_43, residual_87, sum_44, residual_89, sum_45, residual_91, sum_46, residual_93, sum_47, residual_95, sum_48, residual_97, sum_49, residual_99, sum_50, residual_101, sum_51, residual_103, sum_52, residual_105, sum_53, residual_107, sum_54, residual_109, sum_55, residual_111, sum_56, residual_113, sum_57], Original ATen: [aten.mul, aten.sum] triton_red_fused_mul_sum_4.run(buf69, buf2, buf3, buf4, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, 512, 4096, grid=grid(512), stream=stream0) buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) # Topologically Sorted Source Nodes: [residual_115, sum_58, residual_117, sum_59, residual_119, sum_60, residual_121, sum_61, residual_123, sum_62, residual_125, sum_63, residual_127, sum_64], Original ATen: [aten.mul, aten.sum] triton_red_fused_mul_sum_5.run(buf132, buf2, buf3, buf4, buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 512, 4096, grid=grid(512), stream=stream0) buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) buf23 = buf14; del buf14 # reuse buf32 = buf23; del buf23 # reuse buf41 = buf32; del buf32 # reuse buf50 = buf41; del buf41 # reuse buf59 = buf50; del buf50 # reuse buf68 = buf59; del buf59 # reuse buf77 = buf68; del buf68 # reuse buf86 = buf77; del buf77 # reuse buf95 = buf86; del buf86 # reuse buf104 = buf95; del buf95 # reuse buf113 = buf104; del buf104 # reuse buf122 = buf113; del buf113 # reuse buf131 = buf122; del buf122 # reuse buf140 = buf131; del buf131 # reuse buf147 = buf140; del buf140 # reuse buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0); del buf148 # reuse # Topologically Sorted Source Nodes: [vlad, setitem, setitem_1, setitem_2, setitem_3, setitem_4, setitem_5, setitem_6, setitem_7, setitem_8, setitem_9, setitem_10, setitem_11, setitem_12, setitem_13, setitem_14, setitem_15, setitem_16, setitem_17, setitem_18, setitem_19, setitem_20, setitem_21, setitem_22, setitem_23, setitem_24, setitem_25, setitem_26, setitem_27, setitem_28, setitem_29, setitem_30, setitem_31, setitem_32, setitem_33, setitem_34, setitem_35, setitem_36, setitem_37, setitem_38, setitem_39, setitem_40, setitem_41, setitem_42, setitem_43, setitem_44, setitem_45, setitem_46, setitem_47, setitem_48, setitem_49, setitem_50, setitem_51, setitem_52, setitem_53, setitem_54, setitem_55, setitem_56, setitem_57, setitem_58, setitem_59, setitem_60, setitem_61, setitem_62, setitem_63, vlad_1], Original ATen: [aten.zeros, aten.copy, aten.linalg_vector_norm] triton_per_fused_copy_linalg_vector_norm_zeros_6.run(buf147, buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18, buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34, buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67, buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83, buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99, buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117, buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135, buf133, buf146, buf144, buf142, 256, 128, grid=grid(256), stream=stream0) del buf101 del buf103 del buf106 del buf108 del buf11 del buf110 del buf112 del buf115 del buf117 del buf119 del buf121 del buf124 del buf126 del buf128 del buf13 del buf130 del buf133 del buf135 del buf137 del buf139 del buf142 del buf144 del buf146 del buf16 del buf18 del buf20 del buf22 del buf25 del buf27 del buf29 del buf31 del buf34 del buf36 del buf38 del buf40 del buf43 del buf45 del buf47 del buf49 del buf5 del buf52 del buf54 del buf56 del buf58 del buf61 del buf63 del buf65 del buf67 del buf7 del buf70 del buf72 del buf74 del buf76 del buf79 del buf81 del buf83 del buf85 del buf88 del buf9 del buf90 del buf92 del buf94 del buf97 del buf99 buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0); del buf150 # reuse buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) # Topologically Sorted Source Nodes: [vlad_3], Original ATen: [aten.linalg_vector_norm, aten.div] triton_red_fused_div_linalg_vector_norm_7.run(buf151, buf147, buf149, buf152, 4, 8192, grid=grid(4), stream=stream0) return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor(primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 128, 64, 64), (524288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from sklearn.neighbors import NearestNeighbors class NetVLAD(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False, use_faiss=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super().__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self.use_faiss = use_faiss def init_params(self, clsts, traindescs): if not self.vladv2: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: if not self.use_faiss: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs ds_sq = np.square(knn.kneighbors(clsts, 2)[1]) del knn else: index = faiss.IndexFlatL2(traindescs.shape[1]) index.add(traindescs) del traindescs ds_sq = np.square(index.search(clsts, 2)[1]) del index self.alpha = (-np.log(0.01) / np.mean(ds_sq[:, 1] - ds_sq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, ds_sq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, x): N, C = x.shape[:2] if self.normalize_input: x = F.normalize(x, p=2, dim=1) soft_assign = self.conv(x).view(N, self.num_clusters, -1) soft_assign = F.softmax(soft_assign, dim=1) x_flatten = x.view(N, C, -1) vlad = torch.zeros([N, self.num_clusters, C], dtype=x.dtype, layout =x.layout, device=x.device) for C in range(self.num_clusters): residual = x_flatten.unsqueeze(0).permute(1, 0, 2, 3 ) - self.centroids[C:C + 1, :].expand(x_flatten.size(-1), - 1, -1).permute(1, 2, 0).unsqueeze(0) residual *= soft_assign[:, C:C + 1, :].unsqueeze(2) vlad[:, C:C + 1, :] = residual.sum(dim=-1) vlad = F.normalize(vlad, p=2, dim=2) vlad = vlad.view(x.size(0), -1) vlad = F.normalize(vlad, p=2, dim=1) return vlad def get_inputs(): return [torch.rand([4, 128, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn from sklearn.neighbors import NearestNeighbors assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_red_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): rnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 4096 x1 = xindex // 4096 _tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 524288 * x1), rmask, eviction_policy='evict_last', other=0.0) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = _tmp3 + tmp2 _tmp3 = tl.where(rmask, tmp4, _tmp3) tmp3 = tl.sum(_tmp3, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) @triton.jit def triton_poi_fused_div_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, out_ptr29, out_ptr30, out_ptr31, out_ptr32, out_ptr33, out_ptr34, out_ptr35, out_ptr36, out_ptr37, out_ptr38, out_ptr39, out_ptr40, out_ptr41, out_ptr42, out_ptr43, out_ptr44, out_ptr45, out_ptr46, out_ptr47, out_ptr48, out_ptr49, out_ptr50, out_ptr51, out_ptr52, out_ptr53, out_ptr54, out_ptr55, out_ptr56, out_ptr57, out_ptr58, out_ptr59, out_ptr60, out_ptr61, out_ptr62, out_ptr63, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 524288 x1 = xindex // 4096 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr2 + (128 + x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (256 + x1), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + (384 + x1), None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr2 + (512 + x1), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + (640 + x1), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + (768 + x1), None, eviction_policy='evict_last') tmp18 = tl.load(in_ptr2 + (896 + x1), None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (1024 + x1), None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1152 + x1), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (1280 + x1), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (1408 + x1), None, eviction_policy='evict_last') tmp28 = tl.load(in_ptr2 + (1536 + x1), None, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (1664 + x1), None, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + (1792 + x1), None, eviction_policy='evict_last') tmp34 = tl.load(in_ptr2 + (1920 + x1), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr2 + (2048 + x1), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr2 + (2176 + x1), None, eviction_policy='evict_last') tmp40 = tl.load(in_ptr2 + (2304 + x1), None, eviction_policy='evict_last') tmp42 = tl.load(in_ptr2 + (2432 + x1), None, eviction_policy='evict_last') tmp44 = tl.load(in_ptr2 + (2560 + x1), None, eviction_policy='evict_last') tmp46 = tl.load(in_ptr2 + (2688 + x1), None, eviction_policy='evict_last') tmp48 = tl.load(in_ptr2 + (2816 + x1), None, eviction_policy='evict_last') tmp50 = tl.load(in_ptr2 + (2944 + x1), None, eviction_policy='evict_last') tmp52 = tl.load(in_ptr2 + (3072 + x1), None, eviction_policy='evict_last') tmp54 = tl.load(in_ptr2 + (3200 + x1), None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (3328 + x1), None, eviction_policy='evict_last') tmp58 = tl.load(in_ptr2 + (3456 + x1), None, eviction_policy='evict_last') tmp60 = tl.load(in_ptr2 + (3584 + x1), None, eviction_policy='evict_last') tmp62 = tl.load(in_ptr2 + (3712 + x1), None, eviction_policy='evict_last') tmp64 = tl.load(in_ptr2 + (3840 + x1), None, eviction_policy='evict_last') tmp66 = tl.load(in_ptr2 + (3968 + x1), None, eviction_policy='evict_last') tmp68 = tl.load(in_ptr2 + (4096 + x1), None, eviction_policy='evict_last') tmp70 = tl.load(in_ptr2 + (4224 + x1), None, eviction_policy='evict_last') tmp72 = tl.load(in_ptr2 + (4352 + x1), None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr2 + (4480 + x1), None, eviction_policy='evict_last') tmp76 = tl.load(in_ptr2 + (4608 + x1), None, eviction_policy='evict_last') tmp78 = tl.load(in_ptr2 + (4736 + x1), None, eviction_policy='evict_last') tmp80 = tl.load(in_ptr2 + (4864 + x1), None, eviction_policy='evict_last') tmp82 = tl.load(in_ptr2 + (4992 + x1), None, eviction_policy='evict_last') tmp84 = tl.load(in_ptr2 + (5120 + x1), None, eviction_policy='evict_last') tmp86 = tl.load(in_ptr2 + (5248 + x1), None, eviction_policy='evict_last') tmp88 = tl.load(in_ptr2 + (5376 + x1), None, eviction_policy='evict_last') tmp90 = tl.load(in_ptr2 + (5504 + x1), None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr2 + (5632 + x1), None, eviction_policy='evict_last') tmp94 = tl.load(in_ptr2 + (5760 + x1), None, eviction_policy='evict_last') tmp96 = tl.load(in_ptr2 + (5888 + x1), None, eviction_policy='evict_last') tmp98 = tl.load(in_ptr2 + (6016 + x1), None, eviction_policy='evict_last') tmp100 = tl.load(in_ptr2 + (6144 + x1), None, eviction_policy='evict_last') tmp102 = tl.load(in_ptr2 + (6272 + x1), None, eviction_policy='evict_last') tmp104 = tl.load(in_ptr2 + (6400 + x1), None, eviction_policy='evict_last') tmp106 = tl.load(in_ptr2 + (6528 + x1), None, eviction_policy='evict_last') tmp108 = tl.load(in_ptr2 + (6656 + x1), None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr2 + (6784 + x1), None, eviction_policy='evict_last') tmp112 = tl.load(in_ptr2 + (6912 + x1), None, eviction_policy='evict_last') tmp114 = tl.load(in_ptr2 + (7040 + x1), None, eviction_policy='evict_last') tmp116 = tl.load(in_ptr2 + (7168 + x1), None, eviction_policy='evict_last') tmp118 = tl.load(in_ptr2 + (7296 + x1), None, eviction_policy='evict_last') tmp120 = tl.load(in_ptr2 + (7424 + x1), None, eviction_policy='evict_last') tmp122 = tl.load(in_ptr2 + (7552 + x1), None, eviction_policy='evict_last') tmp124 = tl.load(in_ptr2 + (7680 + x1), None, eviction_policy='evict_last') tmp126 = tl.load(in_ptr2 + (7808 + x1), None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr2 + (7936 + x1), None, eviction_policy='evict_last') tmp130 = tl.load(in_ptr2 + (8064 + x1), None, eviction_policy='evict_last') tmp2 = libdevice.sqrt(tmp1) tmp3 = 1e-12 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 / tmp4 tmp7 = tmp5 - tmp6 tmp9 = tmp5 - tmp8 tmp11 = tmp5 - tmp10 tmp13 = tmp5 - tmp12 tmp15 = tmp5 - tmp14 tmp17 = tmp5 - tmp16 tmp19 = tmp5 - tmp18 tmp21 = tmp5 - tmp20 tmp23 = tmp5 - tmp22 tmp25 = tmp5 - tmp24 tmp27 = tmp5 - tmp26 tmp29 = tmp5 - tmp28 tmp31 = tmp5 - tmp30 tmp33 = tmp5 - tmp32 tmp35 = tmp5 - tmp34 tmp37 = tmp5 - tmp36 tmp39 = tmp5 - tmp38 tmp41 = tmp5 - tmp40 tmp43 = tmp5 - tmp42 tmp45 = tmp5 - tmp44 tmp47 = tmp5 - tmp46 tmp49 = tmp5 - tmp48 tmp51 = tmp5 - tmp50 tmp53 = tmp5 - tmp52 tmp55 = tmp5 - tmp54 tmp57 = tmp5 - tmp56 tmp59 = tmp5 - tmp58 tmp61 = tmp5 - tmp60 tmp63 = tmp5 - tmp62 tmp65 = tmp5 - tmp64 tmp67 = tmp5 - tmp66 tmp69 = tmp5 - tmp68 tmp71 = tmp5 - tmp70 tmp73 = tmp5 - tmp72 tmp75 = tmp5 - tmp74 tmp77 = tmp5 - tmp76 tmp79 = tmp5 - tmp78 tmp81 = tmp5 - tmp80 tmp83 = tmp5 - tmp82 tmp85 = tmp5 - tmp84 tmp87 = tmp5 - tmp86 tmp89 = tmp5 - tmp88 tmp91 = tmp5 - tmp90 tmp93 = tmp5 - tmp92 tmp95 = tmp5 - tmp94 tmp97 = tmp5 - tmp96 tmp99 = tmp5 - tmp98 tmp101 = tmp5 - tmp100 tmp103 = tmp5 - tmp102 tmp105 = tmp5 - tmp104 tmp107 = tmp5 - tmp106 tmp109 = tmp5 - tmp108 tmp111 = tmp5 - tmp110 tmp113 = tmp5 - tmp112 tmp115 = tmp5 - tmp114 tmp117 = tmp5 - tmp116 tmp119 = tmp5 - tmp118 tmp121 = tmp5 - tmp120 tmp123 = tmp5 - tmp122 tmp125 = tmp5 - tmp124 tmp127 = tmp5 - tmp126 tmp129 = tmp5 - tmp128 tmp131 = tmp5 - tmp130 tl.store(out_ptr0 + x3, tmp5, None) tl.store(out_ptr1 + x3, tmp7, None) tl.store(out_ptr2 + x3, tmp9, None) tl.store(out_ptr3 + x3, tmp11, None) tl.store(out_ptr4 + x3, tmp13, None) tl.store(out_ptr5 + x3, tmp15, None) tl.store(out_ptr6 + x3, tmp17, None) tl.store(out_ptr7 + x3, tmp19, None) tl.store(out_ptr8 + x3, tmp21, None) tl.store(out_ptr9 + x3, tmp23, None) tl.store(out_ptr10 + x3, tmp25, None) tl.store(out_ptr11 + x3, tmp27, None) tl.store(out_ptr12 + x3, tmp29, None) tl.store(out_ptr13 + x3, tmp31, None) tl.store(out_ptr14 + x3, tmp33, None) tl.store(out_ptr15 + x3, tmp35, None) tl.store(out_ptr16 + x3, tmp37, None) tl.store(out_ptr17 + x3, tmp39, None) tl.store(out_ptr18 + x3, tmp41, None) tl.store(out_ptr19 + x3, tmp43, None) tl.store(out_ptr20 + x3, tmp45, None) tl.store(out_ptr21 + x3, tmp47, None) tl.store(out_ptr22 + x3, tmp49, None) tl.store(out_ptr23 + x3, tmp51, None) tl.store(out_ptr24 + x3, tmp53, None) tl.store(out_ptr25 + x3, tmp55, None) tl.store(out_ptr26 + x3, tmp57, None) tl.store(out_ptr27 + x3, tmp59, None) tl.store(out_ptr28 + x3, tmp61, None) tl.store(out_ptr29 + x3, tmp63, None) tl.store(out_ptr30 + x3, tmp65, None) tl.store(out_ptr31 + x3, tmp67, None) tl.store(out_ptr32 + x3, tmp69, None) tl.store(out_ptr33 + x3, tmp71, None) tl.store(out_ptr34 + x3, tmp73, None) tl.store(out_ptr35 + x3, tmp75, None) tl.store(out_ptr36 + x3, tmp77, None) tl.store(out_ptr37 + x3, tmp79, None) tl.store(out_ptr38 + x3, tmp81, None) tl.store(out_ptr39 + x3, tmp83, None) tl.store(out_ptr40 + x3, tmp85, None) tl.store(out_ptr41 + x3, tmp87, None) tl.store(out_ptr42 + x3, tmp89, None) tl.store(out_ptr43 + x3, tmp91, None) tl.store(out_ptr44 + x3, tmp93, None) tl.store(out_ptr45 + x3, tmp95, None) tl.store(out_ptr46 + x3, tmp97, None) tl.store(out_ptr47 + x3, tmp99, None) tl.store(out_ptr48 + x3, tmp101, None) tl.store(out_ptr49 + x3, tmp103, None) tl.store(out_ptr50 + x3, tmp105, None) tl.store(out_ptr51 + x3, tmp107, None) tl.store(out_ptr52 + x3, tmp109, None) tl.store(out_ptr53 + x3, tmp111, None) tl.store(out_ptr54 + x3, tmp113, None) tl.store(out_ptr55 + x3, tmp115, None) tl.store(out_ptr56 + x3, tmp117, None) tl.store(out_ptr57 + x3, tmp119, None) tl.store(out_ptr58 + x3, tmp121, None) tl.store(out_ptr59 + x3, tmp123, None) tl.store(out_ptr60 + x3, tmp125, None) tl.store(out_ptr61 + x3, tmp127, None) tl.store(out_ptr62 + x3, tmp129, None) tl.store(out_ptr63 + x3, tmp131, None) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x0 = xindex % 4096 x1 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 262144 * x1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = triton_helpers.max2(tmp1, 1)[:, None] tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tl.store(out_ptr0 + x3, tmp3, None) tl.store(out_ptr1 + x3, tmp8, None) @triton.jit def triton_red_fused_mul_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, out_ptr28, xnumel, rnumel, XBLOCK: tl. constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x0 = xindex % 128 tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') x1 = xindex // 128 _tmp11 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp38 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp47 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp56 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp74 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp83 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp92 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp101 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp110 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp119 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp128 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp137 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp146 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp155 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp164 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp173 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp182 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp191 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp200 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp209 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp218 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp227 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp236 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp245 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp254 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp263 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp3 = tl.load(in_ptr2 + (r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tl.load(in_ptr4 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp13 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp14 = tl.load(in_ptr2 + (4096 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp23 = tl.load(in_ptr2 + (8192 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp31 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp32 = tl.load(in_ptr2 + (12288 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp40 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp41 = tl.load(in_ptr2 + (16384 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp49 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp50 = tl.load(in_ptr2 + (20480 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp58 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp59 = tl.load(in_ptr2 + (24576 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp67 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp68 = tl.load(in_ptr2 + (28672 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp76 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp77 = tl.load(in_ptr2 + (32768 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp85 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp86 = tl.load(in_ptr2 + (36864 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp95 = tl.load(in_ptr2 + (40960 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp103 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp104 = tl.load(in_ptr2 + (45056 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp112 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp113 = tl.load(in_ptr2 + (49152 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp121 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp122 = tl.load(in_ptr2 + (53248 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp130 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp131 = tl.load(in_ptr2 + (57344 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp139 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp140 = tl.load(in_ptr2 + (61440 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp148 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp149 = tl.load(in_ptr2 + (65536 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp157 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp158 = tl.load(in_ptr2 + (69632 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp167 = tl.load(in_ptr2 + (73728 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp175 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp176 = tl.load(in_ptr2 + (77824 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp184 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp185 = tl.load(in_ptr2 + (81920 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp193 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp194 = tl.load(in_ptr2 + (86016 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp202 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp203 = tl.load(in_ptr2 + (90112 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp211 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp212 = tl.load(in_ptr2 + (94208 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp220 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp221 = tl.load(in_ptr2 + (98304 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp229 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp230 = tl.load(in_ptr2 + (102400 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp239 = tl.load(in_ptr2 + (106496 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp247 = tl.load(in_ptr31 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp248 = tl.load(in_ptr2 + (110592 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp256 = tl.load(in_ptr32 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp257 = tl.load(in_ptr2 + (114688 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tmp0 - tmp1 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp2 * tmp8 tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = _tmp11 + tmp10 _tmp11 = tl.where(rmask & xmask, tmp12, _tmp11) tmp15 = tmp14 - tmp4 tmp16 = tl_math.exp(tmp15) tmp17 = tmp16 / tmp7 tmp18 = tmp13 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp24 = tmp23 - tmp4 tmp25 = tl_math.exp(tmp24) tmp26 = tmp25 / tmp7 tmp27 = tmp22 * tmp26 tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp33 = tmp32 - tmp4 tmp34 = tl_math.exp(tmp33) tmp35 = tmp34 / tmp7 tmp36 = tmp31 * tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = _tmp38 + tmp37 _tmp38 = tl.where(rmask & xmask, tmp39, _tmp38) tmp42 = tmp41 - tmp4 tmp43 = tl_math.exp(tmp42) tmp44 = tmp43 / tmp7 tmp45 = tmp40 * tmp44 tmp46 = tl.broadcast_to(tmp45, [XBLOCK, RBLOCK]) tmp48 = _tmp47 + tmp46 _tmp47 = tl.where(rmask & xmask, tmp48, _tmp47) tmp51 = tmp50 - tmp4 tmp52 = tl_math.exp(tmp51) tmp53 = tmp52 / tmp7 tmp54 = tmp49 * tmp53 tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK]) tmp57 = _tmp56 + tmp55 _tmp56 = tl.where(rmask & xmask, tmp57, _tmp56) tmp60 = tmp59 - tmp4 tmp61 = tl_math.exp(tmp60) tmp62 = tmp61 / tmp7 tmp63 = tmp58 * tmp62 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask & xmask, tmp66, _tmp65) tmp69 = tmp68 - tmp4 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 / tmp7 tmp72 = tmp67 * tmp71 tmp73 = tl.broadcast_to(tmp72, [XBLOCK, RBLOCK]) tmp75 = _tmp74 + tmp73 _tmp74 = tl.where(rmask & xmask, tmp75, _tmp74) tmp78 = tmp77 - tmp4 tmp79 = tl_math.exp(tmp78) tmp80 = tmp79 / tmp7 tmp81 = tmp76 * tmp80 tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK]) tmp84 = _tmp83 + tmp82 _tmp83 = tl.where(rmask & xmask, tmp84, _tmp83) tmp87 = tmp86 - tmp4 tmp88 = tl_math.exp(tmp87) tmp89 = tmp88 / tmp7 tmp90 = tmp85 * tmp89 tmp91 = tl.broadcast_to(tmp90, [XBLOCK, RBLOCK]) tmp93 = _tmp92 + tmp91 _tmp92 = tl.where(rmask & xmask, tmp93, _tmp92) tmp96 = tmp95 - tmp4 tmp97 = tl_math.exp(tmp96) tmp98 = tmp97 / tmp7 tmp99 = tmp94 * tmp98 tmp100 = tl.broadcast_to(tmp99, [XBLOCK, RBLOCK]) tmp102 = _tmp101 + tmp100 _tmp101 = tl.where(rmask & xmask, tmp102, _tmp101) tmp105 = tmp104 - tmp4 tmp106 = tl_math.exp(tmp105) tmp107 = tmp106 / tmp7 tmp108 = tmp103 * tmp107 tmp109 = tl.broadcast_to(tmp108, [XBLOCK, RBLOCK]) tmp111 = _tmp110 + tmp109 _tmp110 = tl.where(rmask & xmask, tmp111, _tmp110) tmp114 = tmp113 - tmp4 tmp115 = tl_math.exp(tmp114) tmp116 = tmp115 / tmp7 tmp117 = tmp112 * tmp116 tmp118 = tl.broadcast_to(tmp117, [XBLOCK, RBLOCK]) tmp120 = _tmp119 + tmp118 _tmp119 = tl.where(rmask & xmask, tmp120, _tmp119) tmp123 = tmp122 - tmp4 tmp124 = tl_math.exp(tmp123) tmp125 = tmp124 / tmp7 tmp126 = tmp121 * tmp125 tmp127 = tl.broadcast_to(tmp126, [XBLOCK, RBLOCK]) tmp129 = _tmp128 + tmp127 _tmp128 = tl.where(rmask & xmask, tmp129, _tmp128) tmp132 = tmp131 - tmp4 tmp133 = tl_math.exp(tmp132) tmp134 = tmp133 / tmp7 tmp135 = tmp130 * tmp134 tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = _tmp137 + tmp136 _tmp137 = tl.where(rmask & xmask, tmp138, _tmp137) tmp141 = tmp140 - tmp4 tmp142 = tl_math.exp(tmp141) tmp143 = tmp142 / tmp7 tmp144 = tmp139 * tmp143 tmp145 = tl.broadcast_to(tmp144, [XBLOCK, RBLOCK]) tmp147 = _tmp146 + tmp145 _tmp146 = tl.where(rmask & xmask, tmp147, _tmp146) tmp150 = tmp149 - tmp4 tmp151 = tl_math.exp(tmp150) tmp152 = tmp151 / tmp7 tmp153 = tmp148 * tmp152 tmp154 = tl.broadcast_to(tmp153, [XBLOCK, RBLOCK]) tmp156 = _tmp155 + tmp154 _tmp155 = tl.where(rmask & xmask, tmp156, _tmp155) tmp159 = tmp158 - tmp4 tmp160 = tl_math.exp(tmp159) tmp161 = tmp160 / tmp7 tmp162 = tmp157 * tmp161 tmp163 = tl.broadcast_to(tmp162, [XBLOCK, RBLOCK]) tmp165 = _tmp164 + tmp163 _tmp164 = tl.where(rmask & xmask, tmp165, _tmp164) tmp168 = tmp167 - tmp4 tmp169 = tl_math.exp(tmp168) tmp170 = tmp169 / tmp7 tmp171 = tmp166 * tmp170 tmp172 = tl.broadcast_to(tmp171, [XBLOCK, RBLOCK]) tmp174 = _tmp173 + tmp172 _tmp173 = tl.where(rmask & xmask, tmp174, _tmp173) tmp177 = tmp176 - tmp4 tmp178 = tl_math.exp(tmp177) tmp179 = tmp178 / tmp7 tmp180 = tmp175 * tmp179 tmp181 = tl.broadcast_to(tmp180, [XBLOCK, RBLOCK]) tmp183 = _tmp182 + tmp181 _tmp182 = tl.where(rmask & xmask, tmp183, _tmp182) tmp186 = tmp185 - tmp4 tmp187 = tl_math.exp(tmp186) tmp188 = tmp187 / tmp7 tmp189 = tmp184 * tmp188 tmp190 = tl.broadcast_to(tmp189, [XBLOCK, RBLOCK]) tmp192 = _tmp191 + tmp190 _tmp191 = tl.where(rmask & xmask, tmp192, _tmp191) tmp195 = tmp194 - tmp4 tmp196 = tl_math.exp(tmp195) tmp197 = tmp196 / tmp7 tmp198 = tmp193 * tmp197 tmp199 = tl.broadcast_to(tmp198, [XBLOCK, RBLOCK]) tmp201 = _tmp200 + tmp199 _tmp200 = tl.where(rmask & xmask, tmp201, _tmp200) tmp204 = tmp203 - tmp4 tmp205 = tl_math.exp(tmp204) tmp206 = tmp205 / tmp7 tmp207 = tmp202 * tmp206 tmp208 = tl.broadcast_to(tmp207, [XBLOCK, RBLOCK]) tmp210 = _tmp209 + tmp208 _tmp209 = tl.where(rmask & xmask, tmp210, _tmp209) tmp213 = tmp212 - tmp4 tmp214 = tl_math.exp(tmp213) tmp215 = tmp214 / tmp7 tmp216 = tmp211 * tmp215 tmp217 = tl.broadcast_to(tmp216, [XBLOCK, RBLOCK]) tmp219 = _tmp218 + tmp217 _tmp218 = tl.where(rmask & xmask, tmp219, _tmp218) tmp222 = tmp221 - tmp4 tmp223 = tl_math.exp(tmp222) tmp224 = tmp223 / tmp7 tmp225 = tmp220 * tmp224 tmp226 = tl.broadcast_to(tmp225, [XBLOCK, RBLOCK]) tmp228 = _tmp227 + tmp226 _tmp227 = tl.where(rmask & xmask, tmp228, _tmp227) tmp231 = tmp230 - tmp4 tmp232 = tl_math.exp(tmp231) tmp233 = tmp232 / tmp7 tmp234 = tmp229 * tmp233 tmp235 = tl.broadcast_to(tmp234, [XBLOCK, RBLOCK]) tmp237 = _tmp236 + tmp235 _tmp236 = tl.where(rmask & xmask, tmp237, _tmp236) tmp240 = tmp239 - tmp4 tmp241 = tl_math.exp(tmp240) tmp242 = tmp241 / tmp7 tmp243 = tmp238 * tmp242 tmp244 = tl.broadcast_to(tmp243, [XBLOCK, RBLOCK]) tmp246 = _tmp245 + tmp244 _tmp245 = tl.where(rmask & xmask, tmp246, _tmp245) tmp249 = tmp248 - tmp4 tmp250 = tl_math.exp(tmp249) tmp251 = tmp250 / tmp7 tmp252 = tmp247 * tmp251 tmp253 = tl.broadcast_to(tmp252, [XBLOCK, RBLOCK]) tmp255 = _tmp254 + tmp253 _tmp254 = tl.where(rmask & xmask, tmp255, _tmp254) tmp258 = tmp257 - tmp4 tmp259 = tl_math.exp(tmp258) tmp260 = tmp259 / tmp7 tmp261 = tmp256 * tmp260 tmp262 = tl.broadcast_to(tmp261, [XBLOCK, RBLOCK]) tmp264 = _tmp263 + tmp262 _tmp263 = tl.where(rmask & xmask, tmp264, _tmp263) tmp11 = tl.sum(_tmp11, 1)[:, None] tl.store(out_ptr0 + x3, tmp11, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + x3, tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + x3, tmp29, xmask) tmp38 = tl.sum(_tmp38, 1)[:, None] tl.store(out_ptr3 + x3, tmp38, xmask) tmp47 = tl.sum(_tmp47, 1)[:, None] tl.store(out_ptr4 + x3, tmp47, xmask) tmp56 = tl.sum(_tmp56, 1)[:, None] tl.store(out_ptr5 + x3, tmp56, xmask) tmp65 = tl.sum(_tmp65, 1)[:, None] tl.store(out_ptr6 + x3, tmp65, xmask) tmp74 = tl.sum(_tmp74, 1)[:, None] tl.store(out_ptr7 + x3, tmp74, xmask) tmp83 = tl.sum(_tmp83, 1)[:, None] tl.store(out_ptr8 + x3, tmp83, xmask) tmp92 = tl.sum(_tmp92, 1)[:, None] tl.store(out_ptr9 + x3, tmp92, xmask) tmp101 = tl.sum(_tmp101, 1)[:, None] tl.store(out_ptr10 + x3, tmp101, xmask) tmp110 = tl.sum(_tmp110, 1)[:, None] tl.store(out_ptr11 + x3, tmp110, xmask) tmp119 = tl.sum(_tmp119, 1)[:, None] tl.store(out_ptr12 + x3, tmp119, xmask) tmp128 = tl.sum(_tmp128, 1)[:, None] tl.store(out_ptr13 + x3, tmp128, xmask) tmp137 = tl.sum(_tmp137, 1)[:, None] tl.store(out_ptr14 + x3, tmp137, xmask) tmp146 = tl.sum(_tmp146, 1)[:, None] tl.store(out_ptr15 + x3, tmp146, xmask) tmp155 = tl.sum(_tmp155, 1)[:, None] tl.store(out_ptr16 + x3, tmp155, xmask) tmp164 = tl.sum(_tmp164, 1)[:, None] tl.store(out_ptr17 + x3, tmp164, xmask) tmp173 = tl.sum(_tmp173, 1)[:, None] tl.store(out_ptr18 + x3, tmp173, xmask) tmp182 = tl.sum(_tmp182, 1)[:, None] tl.store(out_ptr19 + x3, tmp182, xmask) tmp191 = tl.sum(_tmp191, 1)[:, None] tl.store(out_ptr20 + x3, tmp191, xmask) tmp200 = tl.sum(_tmp200, 1)[:, None] tl.store(out_ptr21 + x3, tmp200, xmask) tmp209 = tl.sum(_tmp209, 1)[:, None] tl.store(out_ptr22 + x3, tmp209, xmask) tmp218 = tl.sum(_tmp218, 1)[:, None] tl.store(out_ptr23 + x3, tmp218, xmask) tmp227 = tl.sum(_tmp227, 1)[:, None] tl.store(out_ptr24 + x3, tmp227, xmask) tmp236 = tl.sum(_tmp236, 1)[:, None] tl.store(out_ptr25 + x3, tmp236, xmask) tmp245 = tl.sum(_tmp245, 1)[:, None] tl.store(out_ptr26 + x3, tmp245, xmask) tmp254 = tl.sum(_tmp254, 1)[:, None] tl.store(out_ptr27 + x3, tmp254, xmask) tmp263 = tl.sum(_tmp263, 1)[:, None] tl.store(out_ptr28 + x3, tmp263, xmask) @triton.jit def triton_red_fused_mul_sum_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, out_ptr10, out_ptr11, out_ptr12, out_ptr13, out_ptr14, out_ptr15, out_ptr16, out_ptr17, out_ptr18, out_ptr19, out_ptr20, out_ptr21, out_ptr22, out_ptr23, out_ptr24, out_ptr25, out_ptr26, out_ptr27, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp72 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp81 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp90 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp99 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp108 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp117 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp126 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp135 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp144 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp153 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp162 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp171 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp180 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp189 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp198 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp207 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp216 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp225 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp234 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp243 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp252 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (118784 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (122880 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (126976 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (131072 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (135168 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (139264 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (143360 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.load(in_ptr10 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp66 = tl.load(in_ptr1 + (147456 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.load(in_ptr11 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp75 = tl.load(in_ptr1 + (151552 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp83 = tl.load(in_ptr12 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp84 = tl.load(in_ptr1 + (155648 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp92 = tl.load(in_ptr13 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp93 = tl.load(in_ptr1 + (159744 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp101 = tl.load(in_ptr14 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp102 = tl.load(in_ptr1 + (163840 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp110 = tl.load(in_ptr15 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp111 = tl.load(in_ptr1 + (167936 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp119 = tl.load(in_ptr16 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp120 = tl.load(in_ptr1 + (172032 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp128 = tl.load(in_ptr17 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp129 = tl.load(in_ptr1 + (176128 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.load(in_ptr18 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp138 = tl.load(in_ptr1 + (180224 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.load(in_ptr19 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp147 = tl.load(in_ptr1 + (184320 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp155 = tl.load(in_ptr20 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp156 = tl.load(in_ptr1 + (188416 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp164 = tl.load(in_ptr21 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp165 = tl.load(in_ptr1 + (192512 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp173 = tl.load(in_ptr22 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp174 = tl.load(in_ptr1 + (196608 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp182 = tl.load(in_ptr23 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp183 = tl.load(in_ptr1 + (200704 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp191 = tl.load(in_ptr24 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp192 = tl.load(in_ptr1 + (204800 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp200 = tl.load(in_ptr25 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp201 = tl.load(in_ptr1 + (208896 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.load(in_ptr26 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp210 = tl.load(in_ptr1 + (212992 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.load(in_ptr27 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp219 = tl.load(in_ptr1 + (217088 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp227 = tl.load(in_ptr28 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp228 = tl.load(in_ptr1 + (221184 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp236 = tl.load(in_ptr29 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp237 = tl.load(in_ptr1 + (225280 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp245 = tl.load(in_ptr30 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp246 = tl.load(in_ptr1 + (229376 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp67 = tmp66 - tmp2 tmp68 = tl_math.exp(tmp67) tmp69 = tmp68 / tmp5 tmp70 = tmp65 * tmp69 tmp71 = tl.broadcast_to(tmp70, [XBLOCK, RBLOCK]) tmp73 = _tmp72 + tmp71 _tmp72 = tl.where(rmask & xmask, tmp73, _tmp72) tmp76 = tmp75 - tmp2 tmp77 = tl_math.exp(tmp76) tmp78 = tmp77 / tmp5 tmp79 = tmp74 * tmp78 tmp80 = tl.broadcast_to(tmp79, [XBLOCK, RBLOCK]) tmp82 = _tmp81 + tmp80 _tmp81 = tl.where(rmask & xmask, tmp82, _tmp81) tmp85 = tmp84 - tmp2 tmp86 = tl_math.exp(tmp85) tmp87 = tmp86 / tmp5 tmp88 = tmp83 * tmp87 tmp89 = tl.broadcast_to(tmp88, [XBLOCK, RBLOCK]) tmp91 = _tmp90 + tmp89 _tmp90 = tl.where(rmask & xmask, tmp91, _tmp90) tmp94 = tmp93 - tmp2 tmp95 = tl_math.exp(tmp94) tmp96 = tmp95 / tmp5 tmp97 = tmp92 * tmp96 tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK]) tmp100 = _tmp99 + tmp98 _tmp99 = tl.where(rmask & xmask, tmp100, _tmp99) tmp103 = tmp102 - tmp2 tmp104 = tl_math.exp(tmp103) tmp105 = tmp104 / tmp5 tmp106 = tmp101 * tmp105 tmp107 = tl.broadcast_to(tmp106, [XBLOCK, RBLOCK]) tmp109 = _tmp108 + tmp107 _tmp108 = tl.where(rmask & xmask, tmp109, _tmp108) tmp112 = tmp111 - tmp2 tmp113 = tl_math.exp(tmp112) tmp114 = tmp113 / tmp5 tmp115 = tmp110 * tmp114 tmp116 = tl.broadcast_to(tmp115, [XBLOCK, RBLOCK]) tmp118 = _tmp117 + tmp116 _tmp117 = tl.where(rmask & xmask, tmp118, _tmp117) tmp121 = tmp120 - tmp2 tmp122 = tl_math.exp(tmp121) tmp123 = tmp122 / tmp5 tmp124 = tmp119 * tmp123 tmp125 = tl.broadcast_to(tmp124, [XBLOCK, RBLOCK]) tmp127 = _tmp126 + tmp125 _tmp126 = tl.where(rmask & xmask, tmp127, _tmp126) tmp130 = tmp129 - tmp2 tmp131 = tl_math.exp(tmp130) tmp132 = tmp131 / tmp5 tmp133 = tmp128 * tmp132 tmp134 = tl.broadcast_to(tmp133, [XBLOCK, RBLOCK]) tmp136 = _tmp135 + tmp134 _tmp135 = tl.where(rmask & xmask, tmp136, _tmp135) tmp139 = tmp138 - tmp2 tmp140 = tl_math.exp(tmp139) tmp141 = tmp140 / tmp5 tmp142 = tmp137 * tmp141 tmp143 = tl.broadcast_to(tmp142, [XBLOCK, RBLOCK]) tmp145 = _tmp144 + tmp143 _tmp144 = tl.where(rmask & xmask, tmp145, _tmp144) tmp148 = tmp147 - tmp2 tmp149 = tl_math.exp(tmp148) tmp150 = tmp149 / tmp5 tmp151 = tmp146 * tmp150 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = _tmp153 + tmp152 _tmp153 = tl.where(rmask & xmask, tmp154, _tmp153) tmp157 = tmp156 - tmp2 tmp158 = tl_math.exp(tmp157) tmp159 = tmp158 / tmp5 tmp160 = tmp155 * tmp159 tmp161 = tl.broadcast_to(tmp160, [XBLOCK, RBLOCK]) tmp163 = _tmp162 + tmp161 _tmp162 = tl.where(rmask & xmask, tmp163, _tmp162) tmp166 = tmp165 - tmp2 tmp167 = tl_math.exp(tmp166) tmp168 = tmp167 / tmp5 tmp169 = tmp164 * tmp168 tmp170 = tl.broadcast_to(tmp169, [XBLOCK, RBLOCK]) tmp172 = _tmp171 + tmp170 _tmp171 = tl.where(rmask & xmask, tmp172, _tmp171) tmp175 = tmp174 - tmp2 tmp176 = tl_math.exp(tmp175) tmp177 = tmp176 / tmp5 tmp178 = tmp173 * tmp177 tmp179 = tl.broadcast_to(tmp178, [XBLOCK, RBLOCK]) tmp181 = _tmp180 + tmp179 _tmp180 = tl.where(rmask & xmask, tmp181, _tmp180) tmp184 = tmp183 - tmp2 tmp185 = tl_math.exp(tmp184) tmp186 = tmp185 / tmp5 tmp187 = tmp182 * tmp186 tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK]) tmp190 = _tmp189 + tmp188 _tmp189 = tl.where(rmask & xmask, tmp190, _tmp189) tmp193 = tmp192 - tmp2 tmp194 = tl_math.exp(tmp193) tmp195 = tmp194 / tmp5 tmp196 = tmp191 * tmp195 tmp197 = tl.broadcast_to(tmp196, [XBLOCK, RBLOCK]) tmp199 = _tmp198 + tmp197 _tmp198 = tl.where(rmask & xmask, tmp199, _tmp198) tmp202 = tmp201 - tmp2 tmp203 = tl_math.exp(tmp202) tmp204 = tmp203 / tmp5 tmp205 = tmp200 * tmp204 tmp206 = tl.broadcast_to(tmp205, [XBLOCK, RBLOCK]) tmp208 = _tmp207 + tmp206 _tmp207 = tl.where(rmask & xmask, tmp208, _tmp207) tmp211 = tmp210 - tmp2 tmp212 = tl_math.exp(tmp211) tmp213 = tmp212 / tmp5 tmp214 = tmp209 * tmp213 tmp215 = tl.broadcast_to(tmp214, [XBLOCK, RBLOCK]) tmp217 = _tmp216 + tmp215 _tmp216 = tl.where(rmask & xmask, tmp217, _tmp216) tmp220 = tmp219 - tmp2 tmp221 = tl_math.exp(tmp220) tmp222 = tmp221 / tmp5 tmp223 = tmp218 * tmp222 tmp224 = tl.broadcast_to(tmp223, [XBLOCK, RBLOCK]) tmp226 = _tmp225 + tmp224 _tmp225 = tl.where(rmask & xmask, tmp226, _tmp225) tmp229 = tmp228 - tmp2 tmp230 = tl_math.exp(tmp229) tmp231 = tmp230 / tmp5 tmp232 = tmp227 * tmp231 tmp233 = tl.broadcast_to(tmp232, [XBLOCK, RBLOCK]) tmp235 = _tmp234 + tmp233 _tmp234 = tl.where(rmask & xmask, tmp235, _tmp234) tmp238 = tmp237 - tmp2 tmp239 = tl_math.exp(tmp238) tmp240 = tmp239 / tmp5 tmp241 = tmp236 * tmp240 tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK]) tmp244 = _tmp243 + tmp242 _tmp243 = tl.where(rmask & xmask, tmp244, _tmp243) tmp247 = tmp246 - tmp2 tmp248 = tl_math.exp(tmp247) tmp249 = tmp248 / tmp5 tmp250 = tmp245 * tmp249 tmp251 = tl.broadcast_to(tmp250, [XBLOCK, RBLOCK]) tmp253 = _tmp252 + tmp251 _tmp252 = tl.where(rmask & xmask, tmp253, _tmp252) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) tmp72 = tl.sum(_tmp72, 1)[:, None] tl.store(out_ptr7 + x3, tmp72, xmask) tmp81 = tl.sum(_tmp81, 1)[:, None] tl.store(out_ptr8 + x3, tmp81, xmask) tmp90 = tl.sum(_tmp90, 1)[:, None] tl.store(out_ptr9 + x3, tmp90, xmask) tmp99 = tl.sum(_tmp99, 1)[:, None] tl.store(out_ptr10 + x3, tmp99, xmask) tmp108 = tl.sum(_tmp108, 1)[:, None] tl.store(out_ptr11 + x3, tmp108, xmask) tmp117 = tl.sum(_tmp117, 1)[:, None] tl.store(out_ptr12 + x3, tmp117, xmask) tmp126 = tl.sum(_tmp126, 1)[:, None] tl.store(out_ptr13 + x3, tmp126, xmask) tmp135 = tl.sum(_tmp135, 1)[:, None] tl.store(out_ptr14 + x3, tmp135, xmask) tmp144 = tl.sum(_tmp144, 1)[:, None] tl.store(out_ptr15 + x3, tmp144, xmask) tmp153 = tl.sum(_tmp153, 1)[:, None] tl.store(out_ptr16 + x3, tmp153, xmask) tmp162 = tl.sum(_tmp162, 1)[:, None] tl.store(out_ptr17 + x3, tmp162, xmask) tmp171 = tl.sum(_tmp171, 1)[:, None] tl.store(out_ptr18 + x3, tmp171, xmask) tmp180 = tl.sum(_tmp180, 1)[:, None] tl.store(out_ptr19 + x3, tmp180, xmask) tmp189 = tl.sum(_tmp189, 1)[:, None] tl.store(out_ptr20 + x3, tmp189, xmask) tmp198 = tl.sum(_tmp198, 1)[:, None] tl.store(out_ptr21 + x3, tmp198, xmask) tmp207 = tl.sum(_tmp207, 1)[:, None] tl.store(out_ptr22 + x3, tmp207, xmask) tmp216 = tl.sum(_tmp216, 1)[:, None] tl.store(out_ptr23 + x3, tmp216, xmask) tmp225 = tl.sum(_tmp225, 1)[:, None] tl.store(out_ptr24 + x3, tmp225, xmask) tmp234 = tl.sum(_tmp234, 1)[:, None] tl.store(out_ptr25 + x3, tmp234, xmask) tmp243 = tl.sum(_tmp243, 1)[:, None] tl.store(out_ptr26 + x3, tmp243, xmask) tmp252 = tl.sum(_tmp252, 1)[:, None] tl.store(out_ptr27 + x3, tmp252, xmask) @triton.jit def triton_red_fused_mul_sum_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 512 rnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x3 = xindex x1 = xindex // 128 _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp18 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp27 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp36 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp45 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp54 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp63 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.load(in_ptr1 + (233472 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = tl.load(in_ptr2 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tl.load(in_ptr3 + (r2 + 4096 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tl.load(in_ptr4 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp12 = tl.load(in_ptr1 + (237568 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.load(in_ptr5 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp21 = tl.load(in_ptr1 + (241664 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp29 = tl.load(in_ptr6 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp30 = tl.load(in_ptr1 + (245760 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp38 = tl.load(in_ptr7 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp39 = tl.load(in_ptr1 + (249856 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp47 = tl.load(in_ptr8 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp48 = tl.load(in_ptr1 + (253952 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr9 + (r2 + 4096 * x3), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp57 = tl.load(in_ptr1 + (258048 + r2 + 262144 * x1), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.exp(tmp3) tmp6 = tmp4 / tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp13 = tmp12 - tmp2 tmp14 = tl_math.exp(tmp13) tmp15 = tmp14 / tmp5 tmp16 = tmp11 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = _tmp18 + tmp17 _tmp18 = tl.where(rmask & xmask, tmp19, _tmp18) tmp22 = tmp21 - tmp2 tmp23 = tl_math.exp(tmp22) tmp24 = tmp23 / tmp5 tmp25 = tmp20 * tmp24 tmp26 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp28 = _tmp27 + tmp26 _tmp27 = tl.where(rmask & xmask, tmp28, _tmp27) tmp31 = tmp30 - tmp2 tmp32 = tl_math.exp(tmp31) tmp33 = tmp32 / tmp5 tmp34 = tmp29 * tmp33 tmp35 = tl.broadcast_to(tmp34, [XBLOCK, RBLOCK]) tmp37 = _tmp36 + tmp35 _tmp36 = tl.where(rmask & xmask, tmp37, _tmp36) tmp40 = tmp39 - tmp2 tmp41 = tl_math.exp(tmp40) tmp42 = tmp41 / tmp5 tmp43 = tmp38 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = _tmp45 + tmp44 _tmp45 = tl.where(rmask & xmask, tmp46, _tmp45) tmp49 = tmp48 - tmp2 tmp50 = tl_math.exp(tmp49) tmp51 = tmp50 / tmp5 tmp52 = tmp47 * tmp51 tmp53 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp55 = _tmp54 + tmp53 _tmp54 = tl.where(rmask & xmask, tmp55, _tmp54) tmp58 = tmp57 - tmp2 tmp59 = tl_math.exp(tmp58) tmp60 = tmp59 / tmp5 tmp61 = tmp56 * tmp60 tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK]) tmp64 = _tmp63 + tmp62 _tmp63 = tl.where(rmask & xmask, tmp64, _tmp63) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr0 + x3, tmp9, xmask) tmp18 = tl.sum(_tmp18, 1)[:, None] tl.store(out_ptr1 + x3, tmp18, xmask) tmp27 = tl.sum(_tmp27, 1)[:, None] tl.store(out_ptr2 + x3, tmp27, xmask) tmp36 = tl.sum(_tmp36, 1)[:, None] tl.store(out_ptr3 + x3, tmp36, xmask) tmp45 = tl.sum(_tmp45, 1)[:, None] tl.store(out_ptr4 + x3, tmp45, xmask) tmp54 = tl.sum(_tmp54, 1)[:, None] tl.store(out_ptr5 + x3, tmp54, xmask) tmp63 = tl.sum(_tmp63, 1)[:, None] tl.store(out_ptr6 + x3, tmp63, xmask) @triton.jit def triton_per_fused_copy_linalg_vector_norm_zeros_6(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, in_ptr29, in_ptr30, in_ptr31, in_ptr32, in_ptr33, in_ptr34, in_ptr35, in_ptr36, in_ptr37, in_ptr38, in_ptr39, in_ptr40, in_ptr41, in_ptr42, in_ptr43, in_ptr44, in_ptr45, in_ptr46, in_ptr47, in_ptr48, in_ptr49, in_ptr50, in_ptr51, in_ptr52, in_ptr53, in_ptr54, in_ptr55, in_ptr56, in_ptr57, in_ptr58, in_ptr59, in_ptr60, in_ptr61, in_ptr62, in_ptr63, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) x0 = xindex % 64 r2 = rindex x1 = xindex // 64 x3 = xindex tmp0 = x0 tmp1 = tl.full([1, 1], 4, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1, 1], 5, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (r2 + 128 * x1), tmp5 & xmask, eviction_policy ='evict_last', other=0.0) tmp7 = tl.full([1, 1], 3, tl.int64) tmp8 = tmp0 >= tmp7 tmp9 = tmp0 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tl.load(in_ptr1 + (r2 + 128 * x1), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.full([1, 1], 2, tl.int64) tmp13 = tmp0 >= tmp12 tmp14 = tmp0 < tmp7 tmp15 = tmp13 & tmp14 tmp16 = tl.load(in_ptr2 + (r2 + 128 * x1), tmp15 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tl.full([1, 1], 1, tl.int64) tmp18 = tmp0 >= tmp17 tmp19 = tmp0 < tmp12 tmp20 = tmp18 & tmp19 tmp21 = tl.load(in_ptr3 + (r2 + 128 * x1), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp22 = tmp0 < tmp17 tmp23 = tl.load(in_ptr4 + (r2 + 128 * x1), tmp22 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = 0.0 tmp25 = tl.where(tmp22, tmp23, tmp24) tmp26 = tl.where(tmp20, tmp21, tmp25) tmp27 = tl.where(tmp15, tmp16, tmp26) tmp28 = tl.where(tmp10, tmp11, tmp27) tmp29 = tl.where(tmp5, tmp6, tmp28) tmp30 = tl.full([1, 1], 8, tl.int64) tmp31 = tmp0 >= tmp30 tmp32 = tl.full([1, 1], 9, tl.int64) tmp33 = tmp0 < tmp32 tmp34 = tmp31 & tmp33 tmp35 = tl.load(in_ptr5 + (r2 + 128 * x1), tmp34 & xmask, eviction_policy='evict_last', other=0.0) tmp36 = tl.full([1, 1], 7, tl.int64) tmp37 = tmp0 >= tmp36 tmp38 = tmp0 < tmp30 tmp39 = tmp37 & tmp38 tmp40 = tl.load(in_ptr6 + (r2 + 128 * x1), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp41 = tl.full([1, 1], 6, tl.int64) tmp42 = tmp0 >= tmp41 tmp43 = tmp0 < tmp36 tmp44 = tmp42 & tmp43 tmp45 = tl.load(in_ptr7 + (r2 + 128 * x1), tmp44 & xmask, eviction_policy='evict_last', other=0.0) tmp46 = tmp0 >= tmp3 tmp47 = tmp0 < tmp41 tmp48 = tmp46 & tmp47 tmp49 = tl.load(in_ptr8 + (r2 + 128 * x1), tmp48 & xmask, eviction_policy='evict_last', other=0.0) tmp50 = tl.where(tmp48, tmp49, tmp29) tmp51 = tl.where(tmp44, tmp45, tmp50) tmp52 = tl.where(tmp39, tmp40, tmp51) tmp53 = tl.where(tmp34, tmp35, tmp52) tmp54 = tl.full([1, 1], 12, tl.int64) tmp55 = tmp0 >= tmp54 tmp56 = tl.full([1, 1], 13, tl.int64) tmp57 = tmp0 < tmp56 tmp58 = tmp55 & tmp57 tmp59 = tl.load(in_ptr9 + (r2 + 128 * x1), tmp58 & xmask, eviction_policy='evict_last', other=0.0) tmp60 = tl.full([1, 1], 11, tl.int64) tmp61 = tmp0 >= tmp60 tmp62 = tmp0 < tmp54 tmp63 = tmp61 & tmp62 tmp64 = tl.load(in_ptr10 + (r2 + 128 * x1), tmp63 & xmask, eviction_policy='evict_last', other=0.0) tmp65 = tl.full([1, 1], 10, tl.int64) tmp66 = tmp0 >= tmp65 tmp67 = tmp0 < tmp60 tmp68 = tmp66 & tmp67 tmp69 = tl.load(in_ptr11 + (r2 + 128 * x1), tmp68 & xmask, eviction_policy='evict_last', other=0.0) tmp70 = tmp0 >= tmp32 tmp71 = tmp0 < tmp65 tmp72 = tmp70 & tmp71 tmp73 = tl.load(in_ptr12 + (r2 + 128 * x1), tmp72 & xmask, eviction_policy='evict_last', other=0.0) tmp74 = tl.where(tmp72, tmp73, tmp53) tmp75 = tl.where(tmp68, tmp69, tmp74) tmp76 = tl.where(tmp63, tmp64, tmp75) tmp77 = tl.where(tmp58, tmp59, tmp76) tmp78 = tl.full([1, 1], 16, tl.int64) tmp79 = tmp0 >= tmp78 tmp80 = tl.full([1, 1], 17, tl.int64) tmp81 = tmp0 < tmp80 tmp82 = tmp79 & tmp81 tmp83 = tl.load(in_ptr13 + (r2 + 128 * x1), tmp82 & xmask, eviction_policy='evict_last', other=0.0) tmp84 = tl.full([1, 1], 15, tl.int64) tmp85 = tmp0 >= tmp84 tmp86 = tmp0 < tmp78 tmp87 = tmp85 & tmp86 tmp88 = tl.load(in_ptr14 + (r2 + 128 * x1), tmp87 & xmask, eviction_policy='evict_last', other=0.0) tmp89 = tl.full([1, 1], 14, tl.int64) tmp90 = tmp0 >= tmp89 tmp91 = tmp0 < tmp84 tmp92 = tmp90 & tmp91 tmp93 = tl.load(in_ptr15 + (r2 + 128 * x1), tmp92 & xmask, eviction_policy='evict_last', other=0.0) tmp94 = tmp0 >= tmp56 tmp95 = tmp0 < tmp89 tmp96 = tmp94 & tmp95 tmp97 = tl.load(in_ptr16 + (r2 + 128 * x1), tmp96 & xmask, eviction_policy='evict_last', other=0.0) tmp98 = tl.where(tmp96, tmp97, tmp77) tmp99 = tl.where(tmp92, tmp93, tmp98) tmp100 = tl.where(tmp87, tmp88, tmp99) tmp101 = tl.where(tmp82, tmp83, tmp100) tmp102 = tl.full([1, 1], 20, tl.int64) tmp103 = tmp0 >= tmp102 tmp104 = tl.full([1, 1], 21, tl.int64) tmp105 = tmp0 < tmp104 tmp106 = tmp103 & tmp105 tmp107 = tl.load(in_ptr17 + (r2 + 128 * x1), tmp106 & xmask, eviction_policy='evict_last', other=0.0) tmp108 = tl.full([1, 1], 19, tl.int64) tmp109 = tmp0 >= tmp108 tmp110 = tmp0 < tmp102 tmp111 = tmp109 & tmp110 tmp112 = tl.load(in_ptr18 + (r2 + 128 * x1), tmp111 & xmask, eviction_policy='evict_last', other=0.0) tmp113 = tl.full([1, 1], 18, tl.int64) tmp114 = tmp0 >= tmp113 tmp115 = tmp0 < tmp108 tmp116 = tmp114 & tmp115 tmp117 = tl.load(in_ptr19 + (r2 + 128 * x1), tmp116 & xmask, eviction_policy='evict_last', other=0.0) tmp118 = tmp0 >= tmp80 tmp119 = tmp0 < tmp113 tmp120 = tmp118 & tmp119 tmp121 = tl.load(in_ptr20 + (r2 + 128 * x1), tmp120 & xmask, eviction_policy='evict_last', other=0.0) tmp122 = tl.where(tmp120, tmp121, tmp101) tmp123 = tl.where(tmp116, tmp117, tmp122) tmp124 = tl.where(tmp111, tmp112, tmp123) tmp125 = tl.where(tmp106, tmp107, tmp124) tmp126 = tl.full([1, 1], 24, tl.int64) tmp127 = tmp0 >= tmp126 tmp128 = tl.full([1, 1], 25, tl.int64) tmp129 = tmp0 < tmp128 tmp130 = tmp127 & tmp129 tmp131 = tl.load(in_ptr21 + (r2 + 128 * x1), tmp130 & xmask, eviction_policy='evict_last', other=0.0) tmp132 = tl.full([1, 1], 23, tl.int64) tmp133 = tmp0 >= tmp132 tmp134 = tmp0 < tmp126 tmp135 = tmp133 & tmp134 tmp136 = tl.load(in_ptr22 + (r2 + 128 * x1), tmp135 & xmask, eviction_policy='evict_last', other=0.0) tmp137 = tl.full([1, 1], 22, tl.int64) tmp138 = tmp0 >= tmp137 tmp139 = tmp0 < tmp132 tmp140 = tmp138 & tmp139 tmp141 = tl.load(in_ptr23 + (r2 + 128 * x1), tmp140 & xmask, eviction_policy='evict_last', other=0.0) tmp142 = tmp0 >= tmp104 tmp143 = tmp0 < tmp137 tmp144 = tmp142 & tmp143 tmp145 = tl.load(in_ptr24 + (r2 + 128 * x1), tmp144 & xmask, eviction_policy='evict_last', other=0.0) tmp146 = tl.where(tmp144, tmp145, tmp125) tmp147 = tl.where(tmp140, tmp141, tmp146) tmp148 = tl.where(tmp135, tmp136, tmp147) tmp149 = tl.where(tmp130, tmp131, tmp148) tmp150 = tl.full([1, 1], 28, tl.int64) tmp151 = tmp0 >= tmp150 tmp152 = tl.full([1, 1], 29, tl.int64) tmp153 = tmp0 < tmp152 tmp154 = tmp151 & tmp153 tmp155 = tl.load(in_ptr25 + (r2 + 128 * x1), tmp154 & xmask, eviction_policy='evict_last', other=0.0) tmp156 = tl.full([1, 1], 27, tl.int64) tmp157 = tmp0 >= tmp156 tmp158 = tmp0 < tmp150 tmp159 = tmp157 & tmp158 tmp160 = tl.load(in_ptr26 + (r2 + 128 * x1), tmp159 & xmask, eviction_policy='evict_last', other=0.0) tmp161 = tl.full([1, 1], 26, tl.int64) tmp162 = tmp0 >= tmp161 tmp163 = tmp0 < tmp156 tmp164 = tmp162 & tmp163 tmp165 = tl.load(in_ptr27 + (r2 + 128 * x1), tmp164 & xmask, eviction_policy='evict_last', other=0.0) tmp166 = tmp0 >= tmp128 tmp167 = tmp0 < tmp161 tmp168 = tmp166 & tmp167 tmp169 = tl.load(in_ptr28 + (r2 + 128 * x1), tmp168 & xmask, eviction_policy='evict_last', other=0.0) tmp170 = tl.where(tmp168, tmp169, tmp149) tmp171 = tl.where(tmp164, tmp165, tmp170) tmp172 = tl.where(tmp159, tmp160, tmp171) tmp173 = tl.where(tmp154, tmp155, tmp172) tmp174 = tl.full([1, 1], 32, tl.int64) tmp175 = tmp0 >= tmp174 tmp176 = tl.full([1, 1], 33, tl.int64) tmp177 = tmp0 < tmp176 tmp178 = tmp175 & tmp177 tmp179 = tl.load(in_ptr29 + (r2 + 128 * x1), tmp178 & xmask, eviction_policy='evict_last', other=0.0) tmp180 = tl.full([1, 1], 31, tl.int64) tmp181 = tmp0 >= tmp180 tmp182 = tmp0 < tmp174 tmp183 = tmp181 & tmp182 tmp184 = tl.load(in_ptr30 + (r2 + 128 * x1), tmp183 & xmask, eviction_policy='evict_last', other=0.0) tmp185 = tl.full([1, 1], 30, tl.int64) tmp186 = tmp0 >= tmp185 tmp187 = tmp0 < tmp180 tmp188 = tmp186 & tmp187 tmp189 = tl.load(in_ptr31 + (r2 + 128 * x1), tmp188 & xmask, eviction_policy='evict_last', other=0.0) tmp190 = tmp0 >= tmp152 tmp191 = tmp0 < tmp185 tmp192 = tmp190 & tmp191 tmp193 = tl.load(in_ptr32 + (r2 + 128 * x1), tmp192 & xmask, eviction_policy='evict_last', other=0.0) tmp194 = tl.where(tmp192, tmp193, tmp173) tmp195 = tl.where(tmp188, tmp189, tmp194) tmp196 = tl.where(tmp183, tmp184, tmp195) tmp197 = tl.where(tmp178, tmp179, tmp196) tmp198 = tl.full([1, 1], 36, tl.int64) tmp199 = tmp0 >= tmp198 tmp200 = tl.full([1, 1], 37, tl.int64) tmp201 = tmp0 < tmp200 tmp202 = tmp199 & tmp201 tmp203 = tl.load(in_ptr33 + (r2 + 128 * x1), tmp202 & xmask, eviction_policy='evict_last', other=0.0) tmp204 = tl.full([1, 1], 35, tl.int64) tmp205 = tmp0 >= tmp204 tmp206 = tmp0 < tmp198 tmp207 = tmp205 & tmp206 tmp208 = tl.load(in_ptr34 + (r2 + 128 * x1), tmp207 & xmask, eviction_policy='evict_last', other=0.0) tmp209 = tl.full([1, 1], 34, tl.int64) tmp210 = tmp0 >= tmp209 tmp211 = tmp0 < tmp204 tmp212 = tmp210 & tmp211 tmp213 = tl.load(in_ptr35 + (r2 + 128 * x1), tmp212 & xmask, eviction_policy='evict_last', other=0.0) tmp214 = tmp0 >= tmp176 tmp215 = tmp0 < tmp209 tmp216 = tmp214 & tmp215 tmp217 = tl.load(in_ptr36 + (r2 + 128 * x1), tmp216 & xmask, eviction_policy='evict_last', other=0.0) tmp218 = tl.where(tmp216, tmp217, tmp197) tmp219 = tl.where(tmp212, tmp213, tmp218) tmp220 = tl.where(tmp207, tmp208, tmp219) tmp221 = tl.where(tmp202, tmp203, tmp220) tmp222 = tl.full([1, 1], 40, tl.int64) tmp223 = tmp0 >= tmp222 tmp224 = tl.full([1, 1], 41, tl.int64) tmp225 = tmp0 < tmp224 tmp226 = tmp223 & tmp225 tmp227 = tl.load(in_ptr37 + (r2 + 128 * x1), tmp226 & xmask, eviction_policy='evict_last', other=0.0) tmp228 = tl.full([1, 1], 39, tl.int64) tmp229 = tmp0 >= tmp228 tmp230 = tmp0 < tmp222 tmp231 = tmp229 & tmp230 tmp232 = tl.load(in_ptr38 + (r2 + 128 * x1), tmp231 & xmask, eviction_policy='evict_last', other=0.0) tmp233 = tl.full([1, 1], 38, tl.int64) tmp234 = tmp0 >= tmp233 tmp235 = tmp0 < tmp228 tmp236 = tmp234 & tmp235 tmp237 = tl.load(in_ptr39 + (r2 + 128 * x1), tmp236 & xmask, eviction_policy='evict_last', other=0.0) tmp238 = tmp0 >= tmp200 tmp239 = tmp0 < tmp233 tmp240 = tmp238 & tmp239 tmp241 = tl.load(in_ptr40 + (r2 + 128 * x1), tmp240 & xmask, eviction_policy='evict_last', other=0.0) tmp242 = tl.where(tmp240, tmp241, tmp221) tmp243 = tl.where(tmp236, tmp237, tmp242) tmp244 = tl.where(tmp231, tmp232, tmp243) tmp245 = tl.where(tmp226, tmp227, tmp244) tmp246 = tl.full([1, 1], 44, tl.int64) tmp247 = tmp0 >= tmp246 tmp248 = tl.full([1, 1], 45, tl.int64) tmp249 = tmp0 < tmp248 tmp250 = tmp247 & tmp249 tmp251 = tl.load(in_ptr41 + (r2 + 128 * x1), tmp250 & xmask, eviction_policy='evict_last', other=0.0) tmp252 = tl.full([1, 1], 43, tl.int64) tmp253 = tmp0 >= tmp252 tmp254 = tmp0 < tmp246 tmp255 = tmp253 & tmp254 tmp256 = tl.load(in_ptr42 + (r2 + 128 * x1), tmp255 & xmask, eviction_policy='evict_last', other=0.0) tmp257 = tl.full([1, 1], 42, tl.int64) tmp258 = tmp0 >= tmp257 tmp259 = tmp0 < tmp252 tmp260 = tmp258 & tmp259 tmp261 = tl.load(in_ptr43 + (r2 + 128 * x1), tmp260 & xmask, eviction_policy='evict_last', other=0.0) tmp262 = tmp0 >= tmp224 tmp263 = tmp0 < tmp257 tmp264 = tmp262 & tmp263 tmp265 = tl.load(in_ptr44 + (r2 + 128 * x1), tmp264 & xmask, eviction_policy='evict_last', other=0.0) tmp266 = tl.where(tmp264, tmp265, tmp245) tmp267 = tl.where(tmp260, tmp261, tmp266) tmp268 = tl.where(tmp255, tmp256, tmp267) tmp269 = tl.where(tmp250, tmp251, tmp268) tmp270 = tl.full([1, 1], 48, tl.int64) tmp271 = tmp0 >= tmp270 tmp272 = tl.full([1, 1], 49, tl.int64) tmp273 = tmp0 < tmp272 tmp274 = tmp271 & tmp273 tmp275 = tl.load(in_ptr45 + (r2 + 128 * x1), tmp274 & xmask, eviction_policy='evict_last', other=0.0) tmp276 = tl.full([1, 1], 47, tl.int64) tmp277 = tmp0 >= tmp276 tmp278 = tmp0 < tmp270 tmp279 = tmp277 & tmp278 tmp280 = tl.load(in_ptr46 + (r2 + 128 * x1), tmp279 & xmask, eviction_policy='evict_last', other=0.0) tmp281 = tl.full([1, 1], 46, tl.int64) tmp282 = tmp0 >= tmp281 tmp283 = tmp0 < tmp276 tmp284 = tmp282 & tmp283 tmp285 = tl.load(in_ptr47 + (r2 + 128 * x1), tmp284 & xmask, eviction_policy='evict_last', other=0.0) tmp286 = tmp0 >= tmp248 tmp287 = tmp0 < tmp281 tmp288 = tmp286 & tmp287 tmp289 = tl.load(in_ptr48 + (r2 + 128 * x1), tmp288 & xmask, eviction_policy='evict_last', other=0.0) tmp290 = tl.where(tmp288, tmp289, tmp269) tmp291 = tl.where(tmp284, tmp285, tmp290) tmp292 = tl.where(tmp279, tmp280, tmp291) tmp293 = tl.where(tmp274, tmp275, tmp292) tmp294 = tl.full([1, 1], 52, tl.int64) tmp295 = tmp0 >= tmp294 tmp296 = tl.full([1, 1], 53, tl.int64) tmp297 = tmp0 < tmp296 tmp298 = tmp295 & tmp297 tmp299 = tl.load(in_ptr49 + (r2 + 128 * x1), tmp298 & xmask, eviction_policy='evict_last', other=0.0) tmp300 = tl.full([1, 1], 51, tl.int64) tmp301 = tmp0 >= tmp300 tmp302 = tmp0 < tmp294 tmp303 = tmp301 & tmp302 tmp304 = tl.load(in_ptr50 + (r2 + 128 * x1), tmp303 & xmask, eviction_policy='evict_last', other=0.0) tmp305 = tl.full([1, 1], 50, tl.int64) tmp306 = tmp0 >= tmp305 tmp307 = tmp0 < tmp300 tmp308 = tmp306 & tmp307 tmp309 = tl.load(in_ptr51 + (r2 + 128 * x1), tmp308 & xmask, eviction_policy='evict_last', other=0.0) tmp310 = tmp0 >= tmp272 tmp311 = tmp0 < tmp305 tmp312 = tmp310 & tmp311 tmp313 = tl.load(in_ptr52 + (r2 + 128 * x1), tmp312 & xmask, eviction_policy='evict_last', other=0.0) tmp314 = tl.where(tmp312, tmp313, tmp293) tmp315 = tl.where(tmp308, tmp309, tmp314) tmp316 = tl.where(tmp303, tmp304, tmp315) tmp317 = tl.where(tmp298, tmp299, tmp316) tmp318 = tl.full([1, 1], 56, tl.int64) tmp319 = tmp0 >= tmp318 tmp320 = tl.full([1, 1], 57, tl.int64) tmp321 = tmp0 < tmp320 tmp322 = tmp319 & tmp321 tmp323 = tl.load(in_ptr53 + (r2 + 128 * x1), tmp322 & xmask, eviction_policy='evict_last', other=0.0) tmp324 = tl.full([1, 1], 55, tl.int64) tmp325 = tmp0 >= tmp324 tmp326 = tmp0 < tmp318 tmp327 = tmp325 & tmp326 tmp328 = tl.load(in_ptr54 + (r2 + 128 * x1), tmp327 & xmask, eviction_policy='evict_last', other=0.0) tmp329 = tl.full([1, 1], 54, tl.int64) tmp330 = tmp0 >= tmp329 tmp331 = tmp0 < tmp324 tmp332 = tmp330 & tmp331 tmp333 = tl.load(in_ptr55 + (r2 + 128 * x1), tmp332 & xmask, eviction_policy='evict_last', other=0.0) tmp334 = tmp0 >= tmp296 tmp335 = tmp0 < tmp329 tmp336 = tmp334 & tmp335 tmp337 = tl.load(in_ptr56 + (r2 + 128 * x1), tmp336 & xmask, eviction_policy='evict_last', other=0.0) tmp338 = tl.where(tmp336, tmp337, tmp317) tmp339 = tl.where(tmp332, tmp333, tmp338) tmp340 = tl.where(tmp327, tmp328, tmp339) tmp341 = tl.where(tmp322, tmp323, tmp340) tmp342 = tl.full([1, 1], 60, tl.int64) tmp343 = tmp0 >= tmp342 tmp344 = tl.full([1, 1], 61, tl.int64) tmp345 = tmp0 < tmp344 tmp346 = tmp343 & tmp345 tmp347 = tl.load(in_ptr57 + (r2 + 128 * x1), tmp346 & xmask, eviction_policy='evict_last', other=0.0) tmp348 = tl.full([1, 1], 59, tl.int64) tmp349 = tmp0 >= tmp348 tmp350 = tmp0 < tmp342 tmp351 = tmp349 & tmp350 tmp352 = tl.load(in_ptr58 + (r2 + 128 * x1), tmp351 & xmask, eviction_policy='evict_last', other=0.0) tmp353 = tl.full([1, 1], 58, tl.int64) tmp354 = tmp0 >= tmp353 tmp355 = tmp0 < tmp348 tmp356 = tmp354 & tmp355 tmp357 = tl.load(in_ptr59 + (r2 + 128 * x1), tmp356 & xmask, eviction_policy='evict_last', other=0.0) tmp358 = tmp0 >= tmp320 tmp359 = tmp0 < tmp353 tmp360 = tmp358 & tmp359 tmp361 = tl.load(in_ptr60 + (r2 + 128 * x1), tmp360 & xmask, eviction_policy='evict_last', other=0.0) tmp362 = tl.where(tmp360, tmp361, tmp341) tmp363 = tl.where(tmp356, tmp357, tmp362) tmp364 = tl.where(tmp351, tmp352, tmp363) tmp365 = tl.where(tmp346, tmp347, tmp364) tmp366 = tl.full([1, 1], 63, tl.int64) tmp367 = tmp0 >= tmp366 tmp368 = tl.load(in_ptr61 + (r2 + 128 * x1), tmp367 & xmask, eviction_policy='evict_last', other=0.0) tmp369 = tl.full([1, 1], 62, tl.int64) tmp370 = tmp0 >= tmp369 tmp371 = tmp0 < tmp366 tmp372 = tmp370 & tmp371 tmp373 = tl.load(in_ptr62 + (r2 + 128 * x1), tmp372 & xmask, eviction_policy='evict_last', other=0.0) tmp374 = tmp0 >= tmp344 tmp375 = tmp0 < tmp369 tmp376 = tmp374 & tmp375 tmp377 = tl.load(in_ptr63 + (r2 + 128 * x1), tmp376 & xmask, eviction_policy='evict_last', other=0.0) tmp378 = tl.where(tmp376, tmp377, tmp365) tmp379 = tl.where(tmp372, tmp373, tmp378) tmp380 = tl.where(tmp367, tmp368, tmp379) tmp381 = tmp380 * tmp380 tmp382 = tl.broadcast_to(tmp381, [XBLOCK, RBLOCK]) tmp384 = tl.where(xmask, tmp382, 0) tmp385 = tl.sum(tmp384, 1)[:, None] tmp386 = libdevice.sqrt(tmp385) tl.store(in_out_ptr0 + (r2 + 128 * x3), tmp380, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x3, tmp386, xmask) @triton.jit def triton_red_fused_div_linalg_vector_norm_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr): xnumel = 4 rnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp7 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp2 = 1e-12 tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 / tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = _tmp7 + tmp6 _tmp7 = tl.where(rmask & xmask, tmp8, _tmp7) tmp7 = tl.sum(_tmp7, 1)[:, None] tmp9 = libdevice.sqrt(tmp7) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp9, xmask) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp11 = tl.load(in_ptr1 + (64 * x0 + r1 // 128), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp12 = 1e-12 tmp13 = triton_helpers.maximum(tmp11, tmp12) tmp14 = tmp10 / tmp13 tmp15 = triton_helpers.maximum(tmp9, tmp12) tmp16 = tmp14 / tmp15 tl.store(out_ptr0 + (r1 + 8192 * x0), tmp16, rmask & xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 128, 64, 64), (524288, 4096, 64, 1)) assert_size_stride(primals_2, (64, 128, 1, 1), (128, 1, 1, 1)) assert_size_stride(primals_3, (64, 128), (128, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) get_raw_stream(0) triton_red_fused_linalg_vector_norm_0[grid(16384)](primals_1, buf0, 16384, 128, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32) buf6 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf8 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf10 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf12 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf17 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf19 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf21 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf24 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf26 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf28 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf30 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf33 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf35 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf37 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf39 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf42 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf44 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf46 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf48 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf51 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf53 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf55 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf57 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf60 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf62 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf64 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf66 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf69 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf71 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf73 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf75 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf78 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf80 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf82 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf84 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf87 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf89 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf91 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf93 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf96 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf98 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf100 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf102 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf105 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf107 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf109 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf111 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf114 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf116 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf118 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf120 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf123 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf125 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf127 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf129 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf132 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf134 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf136 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf138 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf141 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf143 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) buf145 = empty_strided_cuda((4, 1, 128, 4096), (524288, 2097152, 4096, 1), torch.float32) triton_poi_fused_div_sub_1[grid(2097152)](primals_1, buf0, primals_3, buf1, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, 2097152, XBLOCK=512, num_warps= 8, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf3 = reinterpret_tensor(buf0, (4, 1, 4096), (4096, 4096, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 1, 4096), (4096, 4096, 1), torch.float32) triton_per_fused__softmax_2[grid(16384)](buf2, buf3, buf4, 16384, 64, XBLOCK=8, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf7 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf9 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf11 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf13 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf16 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf18 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf20 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf22 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf25 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf27 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf29 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf31 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf34 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf36 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf38 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf40 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf43 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf45 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf47 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf49 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf52 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf54 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf56 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf58 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf61 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf63 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf65 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf67 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sub_sum_3[grid(512)](buf1, primals_3, buf2, buf3, buf4, buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf5, buf7, buf9, buf11, buf13, buf16, buf18, buf20, buf22, buf25, buf27, buf29, buf31, buf34, buf36, buf38, buf40, buf43, buf45, buf47, buf49, buf52, buf54, buf56, buf58, buf61, buf63, buf65, buf67, 512, 4096, XBLOCK=1, RBLOCK= 1024, num_warps=16, num_stages=1) buf70 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf72 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf74 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf76 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf79 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf81 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf83 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf85 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf88 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf90 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf92 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf94 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf97 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf99 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf101 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf103 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf106 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf108 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf110 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf112 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf115 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf117 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf119 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf121 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf124 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf126 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf128 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf130 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_4[grid(512)](buf69, buf2, buf3, buf4, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf70, buf72, buf74, buf76, buf79, buf81, buf83, buf85, buf88, buf90, buf92, buf94, buf97, buf99, buf101, buf103, buf106, buf108, buf110, buf112, buf115, buf117, buf119, buf121, buf124, buf126, buf128, buf130, 512, 4096, XBLOCK=1, RBLOCK= 1024, num_warps=16, num_stages=1) buf133 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf135 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf137 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf139 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf142 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf144 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) buf146 = empty_strided_cuda((4, 1, 128), (128, 512, 1), torch.float32) triton_red_fused_mul_sum_5[grid(512)](buf132, buf2, buf3, buf4, buf134, buf136, buf138, buf141, buf143, buf145, buf133, buf135, buf137, buf139, buf142, buf144, buf146, 512, 4096, XBLOCK=1, RBLOCK=1024, num_warps=16, num_stages=1) buf14 = empty_strided_cuda((4, 64, 128), (8192, 128, 1), torch.float32) buf23 = buf14 del buf14 buf32 = buf23 del buf23 buf41 = buf32 del buf32 buf50 = buf41 del buf41 buf59 = buf50 del buf50 buf68 = buf59 del buf59 buf77 = buf68 del buf68 buf86 = buf77 del buf77 buf95 = buf86 del buf86 buf104 = buf95 del buf95 buf113 = buf104 del buf104 buf122 = buf113 del buf113 buf131 = buf122 del buf122 buf140 = buf131 del buf131 buf147 = buf140 del buf140 buf148 = empty_strided_cuda((4, 64, 1), (64, 1, 256), torch.float32) buf149 = reinterpret_tensor(buf148, (4, 64, 1), (64, 1, 1), 0) del buf148 triton_per_fused_copy_linalg_vector_norm_zeros_6[grid(256)](buf147, buf149, buf13, buf11, buf9, buf7, buf5, buf22, buf20, buf18, buf16, buf31, buf29, buf27, buf25, buf40, buf38, buf36, buf34, buf49, buf47, buf45, buf43, buf58, buf56, buf54, buf52, buf67, buf65, buf63, buf61, buf76, buf74, buf72, buf70, buf85, buf83, buf81, buf79, buf94, buf92, buf90, buf88, buf103, buf101, buf99, buf97, buf112, buf110, buf108, buf106, buf121, buf119, buf117, buf115, buf130, buf128, buf126, buf124, buf139, buf137, buf135, buf133, buf146, buf144, buf142, 256, 128, XBLOCK=1, num_warps=2, num_stages=1) del buf101 del buf103 del buf106 del buf108 del buf11 del buf110 del buf112 del buf115 del buf117 del buf119 del buf121 del buf124 del buf126 del buf128 del buf13 del buf130 del buf133 del buf135 del buf137 del buf139 del buf142 del buf144 del buf146 del buf16 del buf18 del buf20 del buf22 del buf25 del buf27 del buf29 del buf31 del buf34 del buf36 del buf38 del buf40 del buf43 del buf45 del buf47 del buf49 del buf5 del buf52 del buf54 del buf56 del buf58 del buf61 del buf63 del buf65 del buf67 del buf7 del buf70 del buf72 del buf74 del buf76 del buf79 del buf81 del buf83 del buf85 del buf88 del buf9 del buf90 del buf92 del buf94 del buf97 del buf99 buf150 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf151 = reinterpret_tensor(buf150, (4, 1), (1, 1), 0) del buf150 buf152 = empty_strided_cuda((4, 8192), (8192, 1), torch.float32) triton_red_fused_div_linalg_vector_norm_7[grid(4)](buf151, buf147, buf149, buf152, 4, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1) return (buf152, primals_2, buf1, buf2, buf3, buf4, reinterpret_tensor( primals_3, (1, 128), (128, 1), 0), buf6, buf8, buf10, buf12, buf15, buf17, buf19, buf21, buf24, buf26, buf28, buf30, buf33, buf35, buf37, buf39, buf42, buf44, buf46, buf48, buf51, buf53, buf55, buf57, buf60, buf62, buf64, buf66, buf69, buf71, buf73, buf75, buf78, buf80, buf82, buf84, buf87, buf89, buf91, buf93, buf96, buf98, buf100, buf102, buf105, buf107, buf109, buf111, buf114, buf116, buf118, buf120, buf123, buf125, buf127, buf129, buf132, buf134, buf136, buf138, buf141, buf143, buf145, buf147, buf149, buf151) class NetVLADNew(nn.Module): """NetVLAD layer implementation""" def __init__(self, num_clusters=64, dim=128, normalize_input=True, vladv2=False, use_faiss=True): """ Args: num_clusters : int The number of clusters dim : int Dimension of descriptors normalize_input : bool If true, descriptor-wise L2 normalization is applied to input. vladv2 : bool If true, use vladv2 otherwise use vladv1 """ super().__init__() self.num_clusters = num_clusters self.dim = dim self.alpha = 0 self.vladv2 = vladv2 self.normalize_input = normalize_input self.conv = nn.Conv2d(dim, num_clusters, kernel_size=(1, 1), bias= vladv2) self.centroids = nn.Parameter(torch.rand(num_clusters, dim)) self.use_faiss = use_faiss def init_params(self, clsts, traindescs): if not self.vladv2: clstsAssign = clsts / np.linalg.norm(clsts, axis=1, keepdims=True) dots = np.dot(clstsAssign, traindescs.T) dots.sort(0) dots = dots[::-1, :] self.alpha = (-np.log(0.01) / np.mean(dots[0, :] - dots[1, :]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) self.conv.weight = nn.Parameter(torch.from_numpy(self.alpha * clstsAssign).unsqueeze(2).unsqueeze(3)) self.conv.bias = None else: if not self.use_faiss: knn = NearestNeighbors(n_jobs=-1) knn.fit(traindescs) del traindescs ds_sq = np.square(knn.kneighbors(clsts, 2)[1]) del knn else: index = faiss.IndexFlatL2(traindescs.shape[1]) index.add(traindescs) del traindescs ds_sq = np.square(index.search(clsts, 2)[1]) del index self.alpha = (-np.log(0.01) / np.mean(ds_sq[:, 1] - ds_sq[:, 0]) ).item() self.centroids = nn.Parameter(torch.from_numpy(clsts)) del clsts, ds_sq self.conv.weight = nn.Parameter((2.0 * self.alpha * self. centroids).unsqueeze(-1).unsqueeze(-1)) self.conv.bias = nn.Parameter(-self.alpha * self.centroids.norm (dim=1)) def forward(self, input_0): primals_3 = self.centroids primals_2 = self.conv.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
leochien1110/Patch-NetVLAD
NetVLAD
false
7,351
[ "MIT" ]
1
9282217dd2c9bcf0446a05400fd277e651cecf4e
https://github.com/leochien1110/Patch-NetVLAD/tree/9282217dd2c9bcf0446a05400fd277e651cecf4e
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/cs/ccsxpe36istx6656so5auwsbxqfwu2rgcc33zs6h4cp3jau7o7bt.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 7680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + (x0), tmp2, xmask) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2s/c2s2pec3vduo4xn2kfu53hypzbhir2ql56lmvazfmymhwv2ehhv5.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {}) triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (120, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 120), (120, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 120), (120, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 120), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 120), (1920, 480, 120, 1), 0); del buf0 # reuse buf4 = empty_strided_cuda((4, 4, 4, 120), (1920, 480, 120, 1), torch.bool) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, buf4, 7680, grid=grid(7680), stream=stream0) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (64, 120), (120, 1), 0), reinterpret_tensor(primals_3, (120, 1), (1, 120), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_1.run(buf3, 64, grid=grid(64), stream=stream0) return (buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 120), (120, 1), 0), buf3, primals_3, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((120, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, 120), (120, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F from torch import nn import torch.utils.data class Net(nn.Module): def __init__(self, in_dim): super().__init__() self.fc1 = nn.Linear(in_dim, 120, bias=False) nn.init.normal_(self.fc1.weight, mean=0, std=1) self.fc2 = nn.Linear(120, 1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return self.sigmoid(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7680 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (120, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (1, 120), (120, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 120), (120, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 120), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 120), (1920, 480, 120, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 120), (1920, 480, 120, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(7680)](buf1, buf4, 7680, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 120), (120, 1), 0), reinterpret_tensor(primals_3, (120, 1), (1, 120), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_sigmoid_1[grid(64)](buf3, 64, XBLOCK=64, num_warps =1, num_stages=1) return buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 120), (120, 1), 0 ), buf3, primals_3, buf4 class NetNew(nn.Module): def __init__(self, in_dim): super().__init__() self.fc1 = nn.Linear(in_dim, 120, bias=False) nn.init.normal_(self.fc1.weight, mean=0, std=1) self.fc2 = nn.Linear(120, 1, bias=False) self.sigmoid = nn.Sigmoid() def forward(self, input_0): primals_1 = self.fc1.weight primals_3 = self.fc2.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
nmichlo/msc-research
Net
false
7,352
[ "MIT" ]
1
625e57eca77bbfbc4728ccebdb0733e1613bd258
https://github.com/nmichlo/msc-research/tree/625e57eca77bbfbc4728ccebdb0733e1613bd258
StdConv3d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vh/cvhrhefawu2fnqeyntrkw3rabqvznfyfimavjdorrjgo6emmscqt.py # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] # Source node to ATen node mapping: # add => add # sqrt => sqrt # sub => sub # var_mean => var_mean # w => div # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3, 4]), kwargs = {correction: 0, keepdim: True}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) triton_per_fused_add_div_sqrt_sub_var_mean_0 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): xnumel = 4 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp18, None) tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sr/csrhsrdic5iyy4yxrz5ceuduwhllrnbrs3ear4tfocob4nmhl4ke.py # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv3d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %div, %primals_2, [1, 1, 1], [0, 0, 0], [1, 1, 1], False, [0, 0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1, 1, 1, 1), (1, 4, 4, 4, 4), torch.float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1, 1), (1, 1, 1, 1, 1), 0); del buf1 # reuse buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_per_fused_add_div_sqrt_sub_var_mean_0.run(buf3, primals_1, buf4, 4, 256, grid=grid(4), stream=stream0) # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1)) buf6 = reinterpret_tensor(buf5, (1, 4, 1, 1, 1), (4, 1, 4, 4, 4), 0); del buf5 # reuse # Topologically Sorted Source Nodes: [conv3d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf6, primals_2, 4, grid=grid(4), stream=stream0) del primals_2 return (reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0), primals_1, buf3, buf4, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.jit import torch.nn.functional as F import torch.nn.functional class StdConv3d(nn.Conv3d): def forward(self, x): w = self.weight v, m = torch.var_mean(w, dim=[1, 2, 3, 4], keepdim=True, unbiased=False ) w = (w - m) / torch.sqrt(v + 1e-05) return F.conv3d(x, w, self.bias, self.stride, self.padding, self. dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.jit import torch.nn.functional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.broadcast_to(tmp0, [RBLOCK]) tmp3 = tl.broadcast_to(tmp1, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.full([1], 256, tl.int32) tmp7 = tmp6.to(tl.float32) tmp8 = tmp5 / tmp7 tmp9 = tmp1 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tl.broadcast_to(tmp10, [RBLOCK]) tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0)) tmp14 = 256.0 tmp15 = tmp13 / tmp14 tmp16 = 1e-05 tmp17 = tmp15 + tmp16 tmp18 = libdevice.sqrt(tmp17) tmp19 = tmp0 - tmp8 tmp20 = tmp19 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp18, None) tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 1, 1, 1, 1), (1, 4, 4, 4, 4), torch. float32) buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1, 1), (1, 1, 1, 1, 1), 0) del buf1 buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3, primals_1, buf4, 4, 256, num_warps=2, num_stages=1) buf5 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf5, (1, 4, 1, 1, 1), (4, 1, 1, 1, 1)) buf6 = reinterpret_tensor(buf5, (1, 4, 1, 1, 1), (4, 1, 4, 4, 4), 0) del buf5 triton_poi_fused_convolution_1[grid(4)](buf6, primals_2, 4, XBLOCK= 4, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0 ), primals_1, buf3, buf4, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class StdConv3dNew(nn.Conv3d): def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
nntrongnghia/TDSI21-Shoulder-Muscle-Segmentation
StdConv3d
false
7,353
[ "Apache-2.0" ]
1
29f0f83d93e4fdd8127261283dcf9242d9914ba6
https://github.com/nntrongnghia/TDSI21-Shoulder-Muscle-Segmentation/tree/29f0f83d93e4fdd8127261283dcf9242d9914ba6
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xk/cxkj3do6aay5nizivli3fgy5pptooqabtyndstansxhjtou3jbmn.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 254016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 15876) % 4 x0 = xindex % 15876 x4 = (xindex // 15876) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (15904*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/du/cduge76mnjneamibk4jsi6ii4doxwgrwyfeq2fzytqfqf7a5zi5o.py # Topologically Sorted Source Nodes: [X], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # X => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 63504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 63 x1 = (xindex // 63) % 63 x2 = (xindex // 3969) x3 = xindex % 3969 tmp0 = tl.load(in_ptr0 + ((2*x0) + (252*x1) + (15904*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (252*x1) + (15904*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (126 + (2*x0) + (252*x1) + (15904*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (127 + (2*x0) + (252*x1) + (15904*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + (4000*x2)), tmp6, xmask) tl.store(out_ptr1 + (x3 + (4096*x2)), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sa/csadke7kw6t6ekznz65r7azao7tz6nwhfwdnjwuka76t4arkrfpz.py # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # relu_1 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 476288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3721) % 32 x0 = xindex % 3721 x4 = (xindex // 3721) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + (3744*x4)), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wq/cwq5fnnqdr3ppjkhho5ypi5zgiudfipuvctfbnkk2txoz4vdy7ju.py # Topologically Sorted Source Nodes: [X_2], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # X_2 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = (xindex // 30) % 30 x2 = (xindex // 900) x3 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (122*x1) + (3744*x2)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (122*x1) + (3744*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (61 + (2*x0) + (122*x1) + (3744*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (62 + (2*x0) + (122*x1) + (3744*x2)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, xmask) tl.store(out_ptr1 + (x3), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/yt/cyteh5nkeip2gobwmta4k2qqbxqzk333dqicv43jffxywz55c2po.py # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # relu_2 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {}) triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 200704 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 784) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2v/c2vkrvdrexw5lsxbic3axyiyqiu4cht7j5djoa7nquwryylur75r.py # Topologically Sorted Source Nodes: [X_4], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # X_4 => _low_memory_max_pool2d_with_offsets_2, getitem_5 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = (xindex // 14) x2 = xindex tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2), tmp15, xmask) tl.store(out_ptr1 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/as/casttxcqvmcl7y2tuxsw6xlbvg3tcr5iy5zwkaobtv4t4vi4zaj4.py # Topologically Sorted Source Nodes: [X_7], Original ATen: [aten.relu] # Source node to ATen node mapping: # X_7 => relu_3 # Graph fragment: # %add_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_9), kwargs = {}) # %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_3,), kwargs = {}) triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nc/cncufi5fwljjqzodj5xs4ful6p33khupoyd6zror7uetj5egismy.py # Topologically Sorted Source Nodes: [X_9], Original ATen: [aten.relu] # Source node to ATen node mapping: # X_9 => relu_4 # Graph fragment: # %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_11), kwargs = {}) # %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {}) triton_poi_fused_relu_7 = async_compile.triton('triton_poi_fused_relu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/x5/cx5pihzzf3a6eymyu3ib45lia74ugyzdsxkzmca3dxgwtf2oyqhq.py # Topologically Sorted Source Nodes: [X_10], Original ATen: [aten.relu] # Source node to ATen node mapping: # X_10 => relu_5 # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_13), kwargs = {}) # %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ba/cba7kvoxo7qpbbk2gjpjeyvgmm7l4ziibhwvbsfg7bmcmxhpyppd.py # Topologically Sorted Source Nodes: [X_11], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # X_11 => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_9 = async_compile.triton('triton_poi_fused_sigmoid_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 1, 128, 128), (16384, 16384, 128, 1)) assert_size_stride(primals_4, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64, ), (1, )) assert_size_stride(primals_8, (64, 12544), (12544, 1)) assert_size_stride(primals_9, (64, ), (1, )) assert_size_stride(primals_10, (32, 64), (64, 1)) assert_size_stride(primals_11, (32, ), (1, )) assert_size_stride(primals_12, (16, 32), (32, 1)) assert_size_stride(primals_13, (16, ), (1, )) assert_size_stride(primals_14, (2, 16), (16, 1)) assert_size_stride(primals_15, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 126, 126), (63504, 15876, 126, 1)) buf1 = empty_strided_cuda((4, 4, 126, 126), (63616, 15904, 126, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 254016, grid=grid(254016), stream=stream0) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 4, 63, 63), (16000, 4000, 63, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 63, 63), (16384, 4096, 63, 1), torch.int8) # Topologically Sorted Source Nodes: [X], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 63504, grid=grid(63504), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 61, 61), (119072, 3721, 61, 1)) buf5 = empty_strided_cuda((4, 32, 61, 61), (119808, 3744, 61, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 476288, grid=grid(476288), stream=stream0) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.int8) # Topologically Sorted Source Nodes: [X_2], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 115200, grid=grid(115200), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 28, 28), (50176, 784, 28, 1)) buf9 = buf8; del buf8 # reuse # Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 200704, grid=grid(200704), stream=stream0) del primals_7 buf10 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.int8) buf11 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.float32) # Topologically Sorted Source Nodes: [X_4], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 50176, grid=grid(50176), stream=stream0) buf12 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf11, (4, 12544), (12544, 1), 0), reinterpret_tensor(primals_8, (12544, 64), (1, 12544), 0), out=buf12) buf13 = buf12; del buf12 # reuse # Topologically Sorted Source Nodes: [X_7], Original ATen: [aten.relu] triton_poi_fused_relu_6.run(buf13, primals_9, 256, grid=grid(256), stream=stream0) del primals_9 buf14 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (64, 32), (1, 64), 0), out=buf14) buf15 = buf14; del buf14 # reuse # Topologically Sorted Source Nodes: [X_9], Original ATen: [aten.relu] triton_poi_fused_relu_7.run(buf15, primals_11, 128, grid=grid(128), stream=stream0) del primals_11 buf16 = empty_strided_cuda((4, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf15, reinterpret_tensor(primals_12, (32, 16), (1, 32), 0), out=buf16) buf17 = buf16; del buf16 # reuse # Topologically Sorted Source Nodes: [X_10], Original ATen: [aten.relu] triton_poi_fused_relu_8.run(buf17, primals_13, 64, grid=grid(64), stream=stream0) del primals_13 buf18 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf17, reinterpret_tensor(primals_14, (16, 2), (1, 16), 0), out=buf18) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [X_11], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_9.run(buf19, primals_15, 8, grid=grid(8), stream=stream0) del primals_15 return (buf19, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (4, 12544), (12544, 1), 0), buf13, buf15, buf17, buf19, primals_14, primals_12, primals_10, primals_8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 128, 128), (16384, 16384, 128, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((64, 12544), (12544, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((32, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((16, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((2, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 4, (3, 3), 1) self.dropout2d_1 = nn.Dropout2d(p=0.5) self.conv2 = nn.Conv2d(4, 32, (3, 3), 1) self.dropout2d_2 = nn.Dropout2d(p=0.5) self.conv3 = nn.Conv2d(32, 64, (3, 3), 1) random_sample = torch.randn((1, 1, 128, 128)) self.flattened_number = self.get_flattened_number(random_sample) self.dropout1 = nn.Dropout(p=0.5) self.fc1 = nn.Linear(self.flattened_number, 64) self.dropout2 = nn.Dropout(p=0.5) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, 16) self.fc4 = nn.Linear(16, 2) def get_flattened_number(self, X): X = F.max_pool2d(torch.relu(self.conv1(X)), (2, 2)) X = F.max_pool2d(torch.relu(self.conv2(X)), (2, 2)) X = F.max_pool2d(torch.relu(self.conv3(X)), (2, 2)) return X.shape[1] * X.shape[2] * X.shape[3] def forward(self, X): X = F.max_pool2d(torch.relu(self.conv1(X)), (2, 2)) X = self.dropout2d_1(X) X = F.max_pool2d(torch.relu(self.conv2(X)), (2, 2)) X = self.dropout2d_1(X) X = F.max_pool2d(torch.relu(self.conv3(X)), (2, 2)) X = X.view(-1, self.flattened_number) X = self.dropout1(X) X = torch.relu(self.fc1(X)) X = self.dropout2(X) X = torch.relu(self.fc2(X)) X = torch.relu(self.fc3(X)) X = torch.sigmoid(self.fc4(X)) return X def get_inputs(): return [torch.rand([4, 1, 128, 128])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 254016 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 15876 % 4 x0 = xindex % 15876 x4 = xindex // 15876 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 15904 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 63504 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 63 x1 = xindex // 63 % 63 x2 = xindex // 3969 x3 = xindex % 3969 tmp0 = tl.load(in_ptr0 + (2 * x0 + 252 * x1 + 15904 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 252 * x1 + 15904 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (126 + 2 * x0 + 252 * x1 + 15904 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (127 + 2 * x0 + 252 * x1 + 15904 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 4000 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 4096 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 476288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3721 % 32 x0 = xindex % 3721 x4 = xindex // 3721 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3744 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x2 = xindex // 900 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 122 * x1 + 3744 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 122 * x1 + 3744 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (61 + 2 * x0 + 122 * x1 + 3744 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (62 + 2 * x0 + 122 * x1 + 3744 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 784 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_sigmoid_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 1, 128, 128), (16384, 16384, 128, 1)) assert_size_stride(primals_4, (32, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 12544), (12544, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (32, 64), (64, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (16, 32), (32, 1)) assert_size_stride(primals_13, (16,), (1,)) assert_size_stride(primals_14, (2, 16), (16, 1)) assert_size_stride(primals_15, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 126, 126), (63504, 15876, 126, 1)) buf1 = empty_strided_cuda((4, 4, 126, 126), (63616, 15904, 126, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(254016)](buf0, primals_2, buf1, 254016, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 4, 63, 63), (16000, 4000, 63, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 63, 63), (16384, 4096, 63, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(63504)](buf1, buf2, buf3, 63504, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 61, 61), (119072, 3721, 61, 1)) buf5 = empty_strided_cuda((4, 32, 61, 61), (119808, 3744, 61, 1), torch.float32) triton_poi_fused_convolution_relu_2[grid(476288)](buf4, primals_5, buf5, 476288, XBLOCK=1024, num_warps=4, num_stages=1) del buf4 del primals_5 buf6 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 30, 30), (28800, 900, 30, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(115200)](buf5, buf6, buf7, 115200, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 28, 28), (50176, 784, 28, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(200704)](buf9, primals_7, 200704, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf10 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.int8) buf11 = empty_strided_cuda((4, 64, 14, 14), (12544, 196, 14, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_5[grid(50176)](buf9, buf10, buf11, 50176, XBLOCK=256, num_warps=4, num_stages=1) buf12 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (4, 12544), (12544, 1), 0), reinterpret_tensor(primals_8, (12544, 64), (1, 12544), 0), out=buf12) buf13 = buf12 del buf12 triton_poi_fused_relu_6[grid(256)](buf13, primals_9, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 buf14 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf13, reinterpret_tensor(primals_10, (64, 32), ( 1, 64), 0), out=buf14) buf15 = buf14 del buf14 triton_poi_fused_relu_7[grid(128)](buf15, primals_11, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_11 buf16 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(buf15, reinterpret_tensor(primals_12, (32, 16), ( 1, 32), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_8[grid(64)](buf17, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 buf18 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_14, (16, 2), (1, 16), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_sigmoid_9[grid(8)](buf19, primals_15, 8, XBLOCK=8, num_warps=1, num_stages=1) del primals_15 return (buf19, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, reinterpret_tensor(buf11, (4, 12544), (12544, 1), 0), buf13, buf15, buf17, buf19, primals_14, primals_12, primals_10, primals_8) class NetNew(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 4, (3, 3), 1) self.dropout2d_1 = nn.Dropout2d(p=0.5) self.conv2 = nn.Conv2d(4, 32, (3, 3), 1) self.dropout2d_2 = nn.Dropout2d(p=0.5) self.conv3 = nn.Conv2d(32, 64, (3, 3), 1) random_sample = torch.randn((1, 1, 128, 128)) self.flattened_number = self.get_flattened_number(random_sample) self.dropout1 = nn.Dropout(p=0.5) self.fc1 = nn.Linear(self.flattened_number, 64) self.dropout2 = nn.Dropout(p=0.5) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, 16) self.fc4 = nn.Linear(16, 2) def get_flattened_number(self, X): X = F.max_pool2d(torch.relu(self.conv1(X)), (2, 2)) X = F.max_pool2d(torch.relu(self.conv2(X)), (2, 2)) X = F.max_pool2d(torch.relu(self.conv3(X)), (2, 2)) return X.shape[1] * X.shape[2] * X.shape[3] def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.fc1.weight primals_9 = self.fc1.bias primals_10 = self.fc2.weight primals_11 = self.fc2.bias primals_12 = self.fc3.weight primals_13 = self.fc3.bias primals_14 = self.fc4.weight primals_15 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
nathantau/BigBrain
Net
false
7,354
[ "MIT" ]
1
b9e81ee3ca91fadeccd59043dcc0062af1e6d365
https://github.com/nathantau/BigBrain/tree/b9e81ee3ca91fadeccd59043dcc0062af1e6d365
FF
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/l6/cl6rx6mcsl2icpctgrn3wungskzdczgzxd3oqw5wii5i7b62bpi6.py # Topologically Sorted Source Nodes: [conv2d_1, img], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # img => add # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_6), kwargs = {}) triton_poi_fused_add_convolution_0 = async_compile.triton('triton_poi_fused_add_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + (x0), None) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/pz/cpzsqynpi6opzpmdjpncx75oeqm32hxmus42g2fp5sdmoifg7ruz.py # Topologically Sorted Source Nodes: [x1, conv2d_2, x2, x1_1, x1_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x1 => convolution # x1_1 => mul # x1_2 => add_1 # x2 => sigmoid # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_7, %primals_8, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_convolution_mul_sigmoid_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 4096) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x3), None) tmp4 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + (x3), None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tmp9 = tmp7 + tmp8 tl.store(in_out_ptr0 + (x3), tmp2, None) tl.store(in_out_ptr1 + (x3), tmp5, None) tl.store(out_ptr0 + (x3), tmp9, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (1, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (1, ), (1, )) assert_size_stride(primals_6, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_7, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_8, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x1], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1, img], Original ATen: [aten.convolution, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_convolution_0.run(buf3, primals_5, primals_6, 16384, grid=grid(16384), stream=stream0) del primals_5 del primals_6 # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse buf5 = buf4; del buf4 # reuse buf6 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) # Topologically Sorted Source Nodes: [x1, conv2d_2, x2, x1_1, x1_2], Original ATen: [aten.convolution, aten.sigmoid, aten.mul, aten.add] triton_poi_fused_add_convolution_mul_sigmoid_1.run(buf1, buf5, primals_2, primals_8, primals_3, buf6, 65536, grid=grid(65536), stream=stream0) del primals_2 del primals_8 return (buf6, buf3, primals_1, primals_3, primals_4, primals_7, buf1, buf3, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn def conv(in_channels, out_channels, kernel_size, bias=False, stride=1): return nn.Conv2d(in_channels, out_channels, kernel_size, padding= kernel_size // 2, bias=bias, stride=stride) class FF(nn.Module): def __init__(self, n_feat, kernel_size=3, bias=True): super(FF, self).__init__() self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias) self.conv2 = conv(n_feat, 1, kernel_size, bias=bias) self.conv3 = conv(1, n_feat, kernel_size, bias=bias) def forward(self, x, x_img): x1 = self.conv1(x) img = self.conv2(x) + x_img x2 = torch.sigmoid(self.conv3(img)) x1 = x1 * x2 x1 = x1 + x return x1, img def get_inputs(): return [torch.rand([4, 4, 64, 64]), torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {'n_feat': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + x0, None) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_add_convolution_mul_sigmoid_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x3, None) tmp4 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp8 = tl.load(in_ptr2 + x3, None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.sigmoid(tmp5) tmp7 = tmp2 * tmp6 tmp9 = tmp7 + tmp8 tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(in_out_ptr1 + x3, tmp5, None) tl.store(out_ptr0 + x3, tmp9, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (1, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (1,), (1,)) assert_size_stride(primals_6, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_7, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_8, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf3 = buf2 del buf2 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(16384)](buf3, primals_5, primals_6, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 del primals_6 buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf1 = buf0 del buf0 buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_add_convolution_mul_sigmoid_1[grid(65536)](buf1, buf5, primals_2, primals_8, primals_3, buf6, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 del primals_8 return (buf6, buf3, primals_1, primals_3, primals_4, primals_7, buf1, buf3, buf5) def conv(in_channels, out_channels, kernel_size, bias=False, stride=1): return nn.Conv2d(in_channels, out_channels, kernel_size, padding= kernel_size // 2, bias=bias, stride=stride) class FFNew(nn.Module): def __init__(self, n_feat, kernel_size=3, bias=True): super(FFNew, self).__init__() self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias) self.conv2 = conv(n_feat, 1, kernel_size, bias=bias) self.conv3 = conv(1, n_feat, kernel_size, bias=bias) def forward(self, input_0, input_1): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_7 = self.conv3.weight primals_8 = self.conv3.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0], output[1]
noxsine/WTSDNet
FF
false
7,355
[ "MIT" ]
1
7f25fb62c705c730c4d2fab6c86f9cf3535e6d80
https://github.com/noxsine/WTSDNet/tree/7f25fb62c705c730c4d2fab6c86f9cf3535e6d80
PSNRLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/du/cdukfjxbk3gltkdefnds44xxup7qgipj5mczmse5kerplojh4icj.py # Topologically Sorted Source Nodes: [mse, truediv, log10, loss], Original ATen: [aten.mse_loss, aten.reciprocal, aten.mul, aten.log10] # Source node to ATen node mapping: # log10 => log10 # loss => mul_1 # mse => mean, pow_1, sub # truediv => mul, reciprocal # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%mean,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {}) # %log10 : [num_users=1] = call_function[target=torch.ops.aten.log10.default](args = (%mul,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log10, 10), kwargs = {}) triton_per_fused_log10_mse_loss_mul_reciprocal_0 = async_compile.triton('triton_per_fused_log10_mse_loss_mul_reciprocal_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_log10_mse_loss_mul_reciprocal_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_log10_mse_loss_mul_reciprocal_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = tl.full([1], 1, tl.int32) tmp10 = tmp9 / tmp8 tmp11 = 1.0 tmp12 = tmp10 * tmp11 tmp13 = libdevice.log10(tmp12) tmp14 = 10.0 tmp15 = tmp13 * tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [mse, truediv, log10, loss], Original ATen: [aten.mse_loss, aten.reciprocal, aten.mul, aten.log10] stream0 = get_raw_stream(0) triton_per_fused_log10_mse_loss_mul_reciprocal_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class PSNRLoss(nn.Module): def __init__(self): super(PSNRLoss, self).__init__() self.criterion = nn.MSELoss(size_average=True) def __repr__(self): return 'PSNR' def forward(self, output, target): mse = self.criterion(output, target) loss = 10 * torch.log10(1.0 / mse) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_log10_mse_loss_mul_reciprocal_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tmp9 = tl.full([1], 1, tl.int32) tmp10 = tmp9 / tmp8 tmp11 = 1.0 tmp12 = tmp10 * tmp11 tmp13 = libdevice.log10(tmp12) tmp14 = 10.0 tmp15 = tmp13 * tmp14 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_log10_mse_loss_mul_reciprocal_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class PSNRLossNew(nn.Module): def __init__(self): super(PSNRLossNew, self).__init__() self.criterion = nn.MSELoss(size_average=True) def __repr__(self): return 'PSNR' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
nthuy190991/geoseg
PSNRLoss
false
7,356
[ "MIT" ]
1
b679af5dc558720df36dddc7abfd4e6ecb46d7de
https://github.com/nthuy190991/geoseg/tree/b679af5dc558720df36dddc7abfd4e6ecb46d7de
CELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/op/cop5hw7u777k4ep7os4kfm3ccj43nso2j632rkw5mpipgje4gdqj.py # Topologically Sorted Source Nodes: [argmax, loss], Original ATen: [aten.argmax, aten.nll_loss2d_forward] # Source node to ATen node mapping: # argmax => argmax # loss => convert_element_type, div, full_default_1, ne_1, ne_2, neg, sum_1, sum_2, where_1 # Graph fragment: # %argmax : [num_users=4] = call_function[target=torch.ops.aten.argmax.default](args = (%arg0_1, 1), kwargs = {}) # %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%where_1,), kwargs = {}) # %ne_2 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%argmax, -100), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%ne_2,), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %convert_element_type), kwargs = {}) triton_per_fused_argmax_nll_loss2d_forward_0 = async_compile.triton('triton_per_fused_argmax_nll_loss2d_forward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_argmax_nll_loss2d_forward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_argmax_nll_loss2d_forward_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = (rindex // 16) r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None) tmp17 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None) tmp32 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tl.full([1, 1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1, 1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tl.full([1, 1], -100, tl.int64) tmp48 = tmp46 != tmp47 tmp49 = tl.where(tmp48, tmp46, tmp10) tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp51 = tmp49 + tmp50 tmp52 = tmp49 < 0 tmp53 = tl.where(tmp52, tmp51, tmp49) tl.device_assert((0 <= tmp53) & (tmp53 < 4), "index out of bounds: 0 <= tmp53 < 4") tmp55 = tl.load(in_ptr1 + (r0 + (16*tmp53) + (64*r1)), None) tmp56 = tl_math.log(tmp55) tmp57 = -tmp56 tmp58 = 0.0 tmp59 = tl.where(tmp48, tmp57, tmp58) tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK]) tmp62 = tl.sum(tmp60, 1)[:, None] tmp63 = tmp48.to(tl.int64) tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = tl.sum(tmp64, 1)[:, None] tmp67 = tmp66.to(tl.float32) tmp68 = tmp62 / tmp67 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp68, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [argmax, loss], Original ATen: [aten.argmax, aten.nll_loss2d_forward] stream0 = get_raw_stream(0) triton_per_fused_argmax_nll_loss2d_forward_0.run(buf3, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CELoss(nn.Module): def __init__(self): super(CELoss, self).__init__() self.criterionBinary = nn.BCELoss(size_average=True) self.criterionMulti = nn.NLLLoss(size_average=True) def __repr__(self): return 'CE' def forward(self, output, target): if target.shape[1] == 1: loss = self.criterionBinary(output, target) else: target = torch.argmax(target, dim=1).long() loss = self.criterionMulti(torch.log(output), target) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_argmax_nll_loss2d_forward_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp1 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp17 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp32 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1, 1], 0, tl.int64) tmp11 = tl.full([1, 1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1, 1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1, 1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tmp47 = tl.full([1, 1], -100, tl.int64) tmp48 = tmp46 != tmp47 tmp49 = tl.where(tmp48, tmp46, tmp10) tmp50 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp51 = tmp49 + tmp50 tmp52 = tmp49 < 0 tmp53 = tl.where(tmp52, tmp51, tmp49) tl.device_assert((0 <= tmp53) & (tmp53 < 4), 'index out of bounds: 0 <= tmp53 < 4') tmp55 = tl.load(in_ptr1 + (r0 + 16 * tmp53 + 64 * r1), None) tmp56 = tl_math.log(tmp55) tmp57 = -tmp56 tmp58 = 0.0 tmp59 = tl.where(tmp48, tmp57, tmp58) tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK]) tmp62 = tl.sum(tmp60, 1)[:, None] tmp63 = tmp48.to(tl.int64) tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = tl.sum(tmp64, 1)[:, None] tmp67 = tmp66.to(tl.float32) tmp68 = tmp62 / tmp67 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp68, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_argmax_nll_loss2d_forward_0[grid(1)](buf3, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class CELossNew(nn.Module): def __init__(self): super(CELossNew, self).__init__() self.criterionBinary = nn.BCELoss(size_average=True) self.criterionMulti = nn.NLLLoss(size_average=True) def __repr__(self): return 'CE' def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
nthuy190991/geoseg
CELoss
false
7,357
[ "MIT" ]
1
b679af5dc558720df36dddc7abfd4e6ecb46d7de
https://github.com/nthuy190991/geoseg/tree/b679af5dc558720df36dddc7abfd4e6ecb46d7de
RatioModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/7w/c7wxxkjeaevhuqpxtdaextqbr36zngmh2fhonoz4o3ulbg7gabtn.py # Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus] # Source node to ATen node mapping: # softplus => exp, gt, log1p, where # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%view_5,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_5, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_5, %log1p), kwargs = {}) triton_poi_fused_softplus_1 = async_compile.triton('triton_poi_fused_softplus_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_softplus_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [tanh_1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus] triton_poi_fused_softplus_1.run(buf5, buf6, 64, grid=grid(64), stream=stream0) return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, buf5, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F class RatioModel(torch.nn.Module): def __init__(self, D_in, hidden_unit_num): super().__init__() None self.l1 = torch.nn.Linear(D_in, hidden_unit_num) self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num) self.l3 = torch.nn.Linear(hidden_unit_num, 1) def forward(self, X): return F.softplus(self.l3(torch.tanh(self.l2(torch.tanh(self.l1(X)))))) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'D_in': 4, 'hidden_unit_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_softplus_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_softplus_1[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf3, buf5, primals_6, primals_4 class RatioModelNew(torch.nn.Module): def __init__(self, D_in, hidden_unit_num): super().__init__() None self.l1 = torch.nn.Linear(D_in, hidden_unit_num) self.l2 = torch.nn.Linear(hidden_unit_num, hidden_unit_num) self.l3 = torch.nn.Linear(hidden_unit_num, 1) def forward(self, input_0): primals_1 = self.l1.weight primals_2 = self.l1.bias primals_4 = self.l2.weight primals_5 = self.l2.bias primals_6 = self.l3.weight primals_7 = self.l3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
numahha/wmopo
RatioModel
false
7,358
[ "MIT" ]
1
1557dab2e8168c1f2e53ffbc435b4000680f1d28
https://github.com/numahha/wmopo/tree/1557dab2e8168c1f2e53ffbc435b4000680f1d28
SchedulerTestNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ej/cejq4hvgtngsrt5ywcfdheadnaab2ydhtz352v7vusjumjik42f2.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + (x0), tmp5, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4b/c4bm44vlkroihtm4ebt4iyykoeyhr2cwl6horqusip6sdixccmyf.py # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), None) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (1, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 16384, grid=grid(16384), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 16384, grid=grid(16384), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1, 1, 1), (1, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn from torch.nn import functional as F from torch import optim as optim class SchedulerTestNet(torch.nn.Module): """ adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py """ def __init__(self): super(SchedulerTestNet, self).__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, x): return self.conv2(F.relu(self.conv1(x))) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16384)](buf3, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class SchedulerTestNetNew(torch.nn.Module): """ adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py """ def __init__(self): super(SchedulerTestNetNew, self).__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
oke-aditya/pytorch-lightning-bolts
SchedulerTestNet
false
7,359
[ "Apache-2.0" ]
1
268df20bb442e7385b709b1488d37fd2767aba3c
https://github.com/oke-aditya/pytorch-lightning-bolts/tree/268df20bb442e7385b709b1488d37fd2767aba3c
DynamicsModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py # Topologically Sorted Source Nodes: [tanh_1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # tanh_1 => tanh_1 # Graph fragment: # %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/4i/c4irhy2t2rm4y7rjyqs3r3qwhxdgoivutbkznqbsbks6epvksfql.py # Topologically Sorted Source Nodes: [ones_like, mul], Original ATen: [aten.ones_like, aten.mul] # Source node to ATen node mapping: # mul => mul # ones_like => full_default # Graph fragment: # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %full_default), kwargs = {}) triton_poi_fused_mul_ones_like_1 = async_compile.triton('triton_poi_fused_mul_ones_like_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_ones_like_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_ones_like_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [tanh_1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ones_like, mul], Original ATen: [aten.ones_like, aten.mul] triton_poi_fused_mul_ones_like_1.run(primals_6, buf3, 256, grid=grid(256), stream=stream0) del primals_6 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class DynamicsModel(torch.nn.Module): def __init__(self, D_in, D_out, hidden_unit_num): None super(DynamicsModel, self).__init__() self.l1 = torch.nn.Linear(D_in, hidden_unit_num) self.l2 = torch.nn.Linear(hidden_unit_num, D_out) self.logvar = torch.nn.Parameter(torch.zeros(D_out), requires_grad=True ) def forward(self, X): mu = self.l2(torch.tanh(self.l1(X))) return self.l2(torch.tanh(self.l1(X))), self.logvar * torch.ones_like( mu) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'D_in': 4, 'D_out': 4, 'hidden_unit_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_mul_ones_like_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_ones_like_1[grid(256)](primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, primals_4 class DynamicsModelNew(torch.nn.Module): def __init__(self, D_in, D_out, hidden_unit_num): None super(DynamicsModelNew, self).__init__() self.l1 = torch.nn.Linear(D_in, hidden_unit_num) self.l2 = torch.nn.Linear(hidden_unit_num, D_out) self.logvar = torch.nn.Parameter(torch.zeros(D_out), requires_grad=True ) def forward(self, input_0): primals_2 = self.logvar primals_1 = self.l1.weight primals_5 = self.l1.bias primals_4 = self.l2.weight primals_6 = self.l2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
numahha/wmopo
DynamicsModel
false
7,360
[ "MIT" ]
1
1557dab2e8168c1f2e53ffbc435b4000680f1d28
https://github.com/numahha/wmopo/tree/1557dab2e8168c1f2e53ffbc435b4000680f1d28
MultipleInputModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/j5/cj57ctnl5nbogixdhvjwy5dw6jnyp7mihjsntksdftfpsaixd2ul.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.add] # Source node to ATen node mapping: # out => add # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {}) # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_tensor_1, %add_tensor), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tl.store(in_out_ptr0 + (x2), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 10), (10, 1)) assert_size_stride(primals_2, (5, 10), (10, 1)) assert_size_stride(primals_3, (5, ), (1, )) assert_size_stride(primals_4, (4, 10), (10, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (10, 5), (1, 10), 0), out=buf0) buf1 = empty_strided_cuda((4, 5), (5, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (10, 5), (1, 10), 0), out=buf1) del primals_2 buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(buf2, primals_3, buf1, 20, grid=grid(20), stream=stream0) del buf1 del primals_3 return (buf2, primals_1, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((5, 10), (10, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((5, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 10), (10, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn from torch import optim as optim class TemplateModel(nn.Module): def __init__(self, mix_data=False): """ Base model for testing. The setting ``mix_data=True`` simulates a wrong implementation. """ super().__init__() self.mix_data = mix_data self.linear = nn.Linear(10, 5) self.input_array = torch.rand(10, 5, 2) def forward(self, *args, **kwargs): return self.forward__standard(*args, **kwargs) def forward__standard(self, x): if self.mix_data: x = x.view(10, -1).permute(1, 0).view(-1, 10) else: x = x.view(-1, 10) return self.linear(x) class MultipleInputModel(TemplateModel): """ Base model for testing verification when forward accepts multiple arguments. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.input_array = torch.rand(10, 5, 2), torch.rand(10, 5, 2) def forward(self, x, y, some_kwarg=True): out = super().forward(x) + super().forward(y) return out def get_inputs(): return [torch.rand([4, 10]), torch.rand([4, 10])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 20 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 5 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 + tmp4 tl.store(in_out_ptr0 + x2, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 10), (10, 1)) assert_size_stride(primals_2, (5, 10), (10, 1)) assert_size_stride(primals_3, (5,), (1,)) assert_size_stride(primals_4, (4, 10), (10, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (10, 5), (1, 10), 0), out=buf0) buf1 = empty_strided_cuda((4, 5), (5, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_2, (10, 5), (1, 10), 0), out=buf1) del primals_2 buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(20)](buf2, primals_3, buf1, 20, XBLOCK= 32, num_warps=1, num_stages=1) del buf1 del primals_3 return buf2, primals_1, primals_4 class TemplateModel(nn.Module): def __init__(self, mix_data=False): """ Base model for testing. The setting ``mix_data=True`` simulates a wrong implementation. """ super().__init__() self.mix_data = mix_data self.linear = nn.Linear(10, 5) self.input_array = torch.rand(10, 5, 2) def forward(self, *args, **kwargs): return self.forward__standard(*args, **kwargs) def forward__standard(self, x): if self.mix_data: x = x.view(10, -1).permute(1, 0).view(-1, 10) else: x = x.view(-1, 10) return self.linear(x) class MultipleInputModelNew(TemplateModel): """ Base model for testing verification when forward accepts multiple arguments. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.input_array = torch.rand(10, 5, 2), torch.rand(10, 5, 2) def forward(self, input_0, input_1): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
oke-aditya/pytorch-lightning-bolts
MultipleInputModel
false
7,361
[ "Apache-2.0" ]
1
268df20bb442e7385b709b1488d37fd2767aba3c
https://github.com/oke-aditya/pytorch-lightning-bolts/tree/268df20bb442e7385b709b1488d37fd2767aba3c
FakeRKHSConvNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/sb/csbfa4itjbflkqsfneo2c6mi3vnrum7dc2e7l2pstoa2kdz6gtd3.py # Topologically Sorted Source Nodes: [conv2d_2, add, x], Original ATen: [aten.convolution, aten.add, aten._native_batch_norm_legit_no_training, aten.native_batch_norm_backward] # Source node to ATen node mapping: # add => add # conv2d_2 => convolution_2 # x => add_2, mul_1, mul_2, sub # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_2, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_2), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %unsqueeze_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %unsqueeze_3), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %unsqueeze_5), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_7), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %unsqueeze_10), kwargs = {}) triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.sqrt(tmp9) tmp11 = tl.full([1], 1, tl.int32) tmp12 = tmp11 / tmp10 tmp13 = 1.0 tmp14 = tmp12 * tmp13 tmp15 = tmp6 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + (x3), tmp19, xmask) tl.store(out_ptr1 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0) # Topologically Sorted Source Nodes: [h_res], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, add, x], Original ATen: [aten.convolution, aten.add, aten._native_batch_norm_legit_no_training, aten.native_batch_norm_backward] triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1.run(buf2, buf3, primals_5, primals_6, primals_7, primals_8, primals_9, buf4, buf5, 256, grid=grid(256), stream=stream0) del buf2 del buf3 del primals_5 del primals_6 del primals_9 return (buf4, primals_1, primals_2, primals_3, primals_4, primals_7, primals_8, buf1, buf5, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np from torch import nn as nn from torch import optim as optim class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super(MaybeBatchNorm2d, self).__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: x = self.bn(x) return x class FakeRKHSConvNet(nn.Module): def __init__(self, n_input, n_output, use_bn=False): super(FakeRKHSConvNet, self).__init__() self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn_out = MaybeBatchNorm2d(n_output, True, True) self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride= 1, padding=0, bias=True) if n_output >= n_input: eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool) for i in range(n_input): eye_mask[i, i, 0, 0] = 1 self.shortcut.weight.data.uniform_(-0.01, 0.01) self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def init_weights(self, init_scale=1.0): nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5)) self.conv1.weight.data.mul_(init_scale) nn.init.constant_(self.conv2.weight, 0.0) def forward(self, x): h_res = self.conv2(self.relu1(self.bn1(self.conv1(x)))) h = self.bn_out(h_res + self.shortcut(x)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_input': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np from torch import nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.sqrt(tmp9) tmp11 = tl.full([1], 1, tl.int32) tmp12 = tmp11 / tmp10 tmp13 = 1.0 tmp14 = tmp12 * tmp13 tmp15 = tmp6 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x3, tmp19, xmask) tl.store(out_ptr1 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=256, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(primals_2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1[ grid(256)](buf2, buf3, primals_5, primals_6, primals_7, primals_8, primals_9, buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 del primals_6 del primals_9 return (buf4, primals_1, primals_2, primals_3, primals_4, primals_7, primals_8, buf1, buf5) class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super(MaybeBatchNorm2d, self).__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: x = self.bn(x) return x class FakeRKHSConvNetNew(nn.Module): def __init__(self, n_input, n_output, use_bn=False): super(FakeRKHSConvNetNew, self).__init__() self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn_out = MaybeBatchNorm2d(n_output, True, True) self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride= 1, padding=0, bias=True) if n_output >= n_input: eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool) for i in range(n_input): eye_mask[i, i, 0, 0] = 1 self.shortcut.weight.data.uniform_(-0.01, 0.01) self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def init_weights(self, init_scale=1.0): nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5)) self.conv1.weight.data.mul_(init_scale) nn.init.constant_(self.conv2.weight, 0.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_5 = self.bn1.bn.weight primals_6 = self.bn1.bn.bias primals_3 = self.conv2.weight primals_7 = self.bn_out.bn.weight primals_8 = self.bn_out.bn.bias primals_4 = self.shortcut.weight primals_9 = self.shortcut.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
oke-aditya/pytorch-lightning-bolts
FakeRKHSConvNet
false
7,362
[ "Apache-2.0" ]
1
268df20bb442e7385b709b1488d37fd2767aba3c
https://github.com/oke-aditya/pytorch-lightning-bolts/tree/268df20bb442e7385b709b1488d37fd2767aba3c
AgentA2C
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5y/c5yq7wkgmmcygrawripwacy566sggsmh2mzk5izw35wk7ferohhu.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 100), (100, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (1, 100), (100, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse buf8 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 6400, grid=grid(6400), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 100), (1, 100), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf2 # reuse buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 6400, grid=grid(6400), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_8, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(buf3, (64, 100), (100, 1), 0), primals_8, primals_6, buf7, primals_4, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AgentA2C(nn.Module): def __init__(self, state_shape, n_actions): super().__init__() self.name = 'a2c' self.n_actions = n_actions self.state_shape = state_shape self.hidden1 = nn.Linear(self.state_shape, 100) self.act1 = nn.ReLU() self.hidden2 = nn.Linear(100, 100) self.act2 = nn.ReLU() self.out1 = nn.Linear(100, self.n_actions) self.out2 = nn.Linear(100, 1) def forward(self, state_t): h = self.act1(self.hidden1(state_t)) h = self.act2(self.hidden2(h)) logits = self.out1(h) value = self.out2(h) return logits, value def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_shape': 4, 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 100), (100, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (1, 100), (100, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1, primals_2, buf8, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 100), (1, 100), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf2 buf7 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf3, primals_5, buf7, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_8, (100, 1), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 100), (100, 1), 0 ), reinterpret_tensor(buf3, (64, 100), (100, 1), 0 ), primals_8, primals_6, buf7, primals_4, buf8 class AgentA2CNew(nn.Module): def __init__(self, state_shape, n_actions): super().__init__() self.name = 'a2c' self.n_actions = n_actions self.state_shape = state_shape self.hidden1 = nn.Linear(self.state_shape, 100) self.act1 = nn.ReLU() self.hidden2 = nn.Linear(100, 100) self.act2 = nn.ReLU() self.out1 = nn.Linear(100, self.n_actions) self.out2 = nn.Linear(100, 1) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.out1.weight primals_7 = self.out1.bias primals_8 = self.out2.weight primals_9 = self.out2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
onimaru/Reinforcement_Learning
AgentA2C
false
7,363
[ "MIT" ]
1
4c45b51a095cb0cb3c18f6a1542befdcab8a58a4
https://github.com/onimaru/Reinforcement_Learning/tree/4c45b51a095cb0cb3c18f6a1542befdcab8a58a4
VishalNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xv/cxvkno5nugllag3heuqgb6cqyvtevlsbixrlypuea2jsl3cwc6oh.py # Topologically Sorted Source Nodes: [conv1d, out1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # conv1d => convolution # out1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [40], [1], False, [0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 15360 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 64) % 60 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2l/c2lu4xsiikkvhigt7gu2gt5rl2ljj4ngousqsy2ox2lxlphzqyya.py # Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out2 => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1], [150], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (60, 1, 81), (81, 81, 1)) assert_size_stride(primals_2, (60, ), (1, )) assert_size_stride(primals_3, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_4, (1, 60, 301), (18060, 301, 1)) assert_size_stride(primals_5, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(40,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 60, 64), (3840, 64, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d, out1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 15360, grid=grid(15360), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(150,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64), (64, 64, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [out2], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((60, 1, 81), (81, 81, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((60, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64), (64, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 60, 301), (18060, 301, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class VishalNet(nn.Module): def __init__(self): super(VishalNet, self).__init__() self.cnn1 = nn.Conv1d(1, 60, 81, 1, 40) self.cnn2 = nn.Conv1d(60, 1, 301, 1, 150) def forward(self, input): out1 = nn.functional.relu(self.cnn1(input)) out2 = self.cnn2(out1) return out2 def get_inputs(): return [torch.rand([4, 1, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 15360 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 64 % 60 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (60, 1, 81), (81, 81, 1)) assert_size_stride(primals_2, (60,), (1,)) assert_size_stride(primals_3, (4, 1, 64), (64, 64, 1)) assert_size_stride(primals_4, (1, 60, 301), (18060, 301, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(40,), dilation=(1,), transposed=False, output_padding= (0,), groups=1, bias=None) assert_size_stride(buf0, (4, 60, 64), (3840, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(15360)](buf1, primals_2, 15360, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(150,), dilation=(1,), transposed=False, output_padding =(0,), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64), (64, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class VishalNetNew(nn.Module): def __init__(self): super(VishalNetNew, self).__init__() self.cnn1 = nn.Conv1d(1, 60, 81, 1, 40) self.cnn2 = nn.Conv1d(60, 1, 301, 1, 150) def forward(self, input_0): primals_1 = self.cnn1.weight primals_2 = self.cnn1.bias primals_4 = self.cnn2.weight primals_5 = self.cnn2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
olivesgatech/Geophysics-2021-Joint-learning-for-spatial-context-based-inversion
VishalNet
false
7,364
[ "MIT" ]
1
56f506dfe62ac3557febb4c8e3c62542b1624a1b
https://github.com/olivesgatech/Geophysics-2021-Joint-learning-for-spatial-context-based-inversion/tree/56f506dfe62ac3557febb4c8e3c62542b1624a1b
L2Norm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jl/cjlxnxxaiarviom2nt4mvz43fs6igit2tnfokz3h6ianosaulkoi.py # Topologically Sorted Source Nodes: [norm, norm_1, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] # Source node to ATen node mapping: # norm => pow_1, pow_2, sum_1 # norm_1 => clamp_min # truediv => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %clamp_min), kwargs = {}) triton_poi_fused_clamp_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, norm_1, truediv], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class L2Norm(nn.Module): def forward(self, x, eps=1e-06): norm = x.norm(dim=1, keepdim=True).clamp(min=eps) return x / norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class L2NormNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
onlyrico/vit-pytorch
L2Norm
false
7,365
[ "MIT" ]
1
e52ac4195550faa9c3372533d325bf649f7354ad
https://github.com/onlyrico/vit-pytorch/tree/e52ac4195550faa9c3372533d325bf649f7354ad
AgentReinforce
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5y/c5yq7wkgmmcygrawripwacy566sggsmh2mzk5izw35wk7ferohhu.py # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # h => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = (xindex // 1600) tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x4), tmp4, xmask) tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 100), (100, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse buf6 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [h], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 6400, grid=grid(6400), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 100), (1, 100), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf2 # reuse buf5 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) # Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 6400, grid=grid(6400), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(buf3, (64, 100), (100, 1), 0), primals_6, buf5, primals_4, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class AgentReinforce(nn.Module): def __init__(self, state_shape, n_actions): super().__init__() self.name = 'reinforce' self.n_actions = n_actions self.state_shape = state_shape self.hidden1 = nn.Linear(self.state_shape, 100) self.act1 = nn.ReLU() self.hidden2 = nn.Linear(100, 100) self.act2 = nn.ReLU() self.out1 = nn.Linear(100, self.n_actions) def forward(self, state_t): h = self.act1(self.hidden1(state_t)) h = self.act2(self.hidden2(h)) logits = self.out1(h) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_shape': 4, 'n_actions': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 100 x2 = xindex % 1600 x3 = xindex // 1600 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x4, tmp4, xmask) tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (100, 100), (100, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1, primals_2, buf6, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 100), (1, 100), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf3, primals_5, buf5, 6400, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 100), (100, 1), 0 ), reinterpret_tensor(buf3, (64, 100), (100, 1), 0 ), primals_6, buf5, primals_4, buf6 class AgentReinforceNew(nn.Module): def __init__(self, state_shape, n_actions): super().__init__() self.name = 'reinforce' self.n_actions = n_actions self.state_shape = state_shape self.hidden1 = nn.Linear(self.state_shape, 100) self.act1 = nn.ReLU() self.hidden2 = nn.Linear(100, 100) self.act2 = nn.ReLU() self.out1 = nn.Linear(100, self.n_actions) def forward(self, input_0): primals_1 = self.hidden1.weight primals_2 = self.hidden1.bias primals_4 = self.hidden2.weight primals_5 = self.hidden2.bias primals_6 = self.out1.weight primals_7 = self.out1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
onimaru/Reinforcement_Learning
AgentReinforce
false
7,366
[ "MIT" ]
1
4c45b51a095cb0cb3c18f6a1542befdcab8a58a4
https://github.com/onimaru/Reinforcement_Learning/tree/4c45b51a095cb0cb3c18f6a1542befdcab8a58a4
AmdimNCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/lo/clogbilbksb5pe5z7x7zzgxgebeyhflxrjymvbwjeoos6el7ofav.py # Topologically Sorted Source Nodes: [raw_scores_2, mul_1, tanh, x_clip, max_1, mul_3, pos_scores, pos_shiftexp, sub_3, exp_1, sub_2, exp, mul_6, neg_sumexp, add, all_logsumexp, nce_scores, mean_1, nce_scores_1], Original ATen: [aten.div, aten.mul, aten.tanh, aten.max, aten.sum, aten.sub, aten.exp, aten.add, aten.log, aten.mean, aten.neg] # Source node to ATen node mapping: # add => add # all_logsumexp => log # exp => exp # exp_1 => exp_1 # max_1 => max_1 # mean_1 => mean_1 # mul_1 => mul_1 # mul_3 => mul_3 # mul_6 => mul_6 # nce_scores => sub_5 # nce_scores_1 => neg # neg_sumexp => sum_2 # pos_scores => sum_1 # pos_shiftexp => sub_4 # raw_scores_2 => div # sub_2 => sub_2 # sub_3 => sub_3 # tanh => tanh # x_clip => mul_2 # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, 2.0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 0.25), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_1,), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 4), kwargs = {}) # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_1, 1, True), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %mul_2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %getitem), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %getitem), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %exp), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, %sum_2), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {}) # %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %log), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_5,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean_1,), kwargs = {}) triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0 = async_compile.triton('triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 4], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tmp6 = 0.25 tmp7 = tmp5 * tmp6 tmp8 = libdevice.tanh(tmp7) tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = tmp2 * tmp10 tmp12 = tmp0 * tmp9 tmp13 = tmp11 - tmp12 tmp15 = tmp1 - tmp14 tmp17 = tmp16 * tmp4 tmp18 = tmp17 * tmp6 tmp19 = libdevice.tanh(tmp18) tmp20 = tmp19 * tmp9 tmp21 = tmp15 * tmp20 tmp22 = tmp14 * tmp9 tmp23 = tmp21 - tmp22 tmp24 = triton_helpers.maximum(tmp13, tmp23) tmp26 = tmp1 - tmp25 tmp28 = tmp27 * tmp4 tmp29 = tmp28 * tmp6 tmp30 = libdevice.tanh(tmp29) tmp31 = tmp30 * tmp9 tmp32 = tmp26 * tmp31 tmp33 = tmp25 * tmp9 tmp34 = tmp32 - tmp33 tmp35 = triton_helpers.maximum(tmp24, tmp34) tmp37 = tmp1 - tmp36 tmp39 = tmp38 * tmp4 tmp40 = tmp39 * tmp6 tmp41 = libdevice.tanh(tmp40) tmp42 = tmp41 * tmp9 tmp43 = tmp37 * tmp42 tmp44 = tmp36 * tmp9 tmp45 = tmp43 - tmp44 tmp46 = triton_helpers.maximum(tmp35, tmp45) tmp47 = tmp13 - tmp46 tmp48 = tl_math.exp(tmp47) tmp49 = tmp2 * tmp48 tmp50 = tmp23 - tmp46 tmp51 = tl_math.exp(tmp50) tmp52 = tmp15 * tmp51 tmp53 = tmp49 + tmp52 tmp54 = tmp34 - tmp46 tmp55 = tl_math.exp(tmp54) tmp56 = tmp26 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = tmp45 - tmp46 tmp59 = tl_math.exp(tmp58) tmp60 = tmp37 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp0 * tmp10 tmp63 = tmp14 * tmp20 tmp64 = tmp62 + tmp63 tmp65 = tmp25 * tmp31 tmp66 = tmp64 + tmp65 tmp67 = tmp36 * tmp42 tmp68 = tmp66 + tmp67 tmp69 = tmp68 - tmp46 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 + tmp61 tmp72 = tl_math.log(tmp71) tmp73 = tmp69 - tmp72 tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK]) tmp76 = tl.sum(tmp74, 1)[:, None] tmp77 = tmp76 / tmp9 tmp78 = -tmp77 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp78, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/i7/ci7wfxaextt4cz2wvuqp24v6te3ikcbwiocamjs25ysoohh6nywz.py # Topologically Sorted Source Nodes: [raw_scores_2, pow_1, mean, lgt_reg], Original ATen: [aten.div, aten.pow, aten.mean, aten.mul] # Source node to ATen node mapping: # lgt_reg => mul # mean => mean # pow_1 => pow_1 # raw_scores_2 => div # Graph fragment: # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, 2.0), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 2.0), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.05), kwargs = {}) triton_per_fused_div_mean_mul_pow_1 = async_compile.triton('triton_per_fused_div_mean_mul_pow_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_mul_pow_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mean_mul_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tmp9 = 0.05 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, arg1_1, out=buf0) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf6 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [raw_scores_2, mul_1, tanh, x_clip, max_1, mul_3, pos_scores, pos_shiftexp, sub_3, exp_1, sub_2, exp, mul_6, neg_sumexp, add, all_logsumexp, nce_scores, mean_1, nce_scores_1], Original ATen: [aten.div, aten.mul, aten.tanh, aten.max, aten.sum, aten.sub, aten.exp, aten.add, aten.log, aten.mean, aten.neg] stream0 = get_raw_stream(0) triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0.run(buf6, arg2_1, buf0, 1, 4, grid=grid(1), stream=stream0) del arg2_1 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [raw_scores_2, pow_1, mean, lgt_reg], Original ATen: [aten.div, aten.pow, aten.mean, aten.mul] triton_per_fused_div_mean_mul_pow_1.run(buf7, buf0, 1, 16, grid=grid(1), stream=stream0) del buf0 return (buf6, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn from torch import optim as optim def tanh_clip(x, clip_val=10.0): """ soft clip values to the range [-clip_val, +clip_val] """ if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class AmdimNCELoss(nn.Module): """ Compute the NCE scores for predicting r_src->r_trg. """ def __init__(self, tclip): super().__init__() self.tclip = tclip def forward(self, anchor_representations, positive_representations, mask_mat): """ Args: anchor_representations: (batch_size, emb_dim) positive_representations: (emb_dim, n_batch * w* h) (ie: nb_feat_vectors x embedding_dim) mask_mat: (n_batch_gpu, n_batch) Output: raw_scores: (n_batch_gpu, n_locs) nce_scores: (n_batch_gpu, n_locs) lgt_reg : scalar """ r_src = anchor_representations r_trg = positive_representations batch_size, emb_dim = r_src.size() nb_feat_vectors = r_trg.size(1) // batch_size mask_pos = mask_mat.unsqueeze(dim=2).expand(-1, -1, nb_feat_vectors ).float() mask_neg = 1.0 - mask_pos raw_scores = torch.mm(r_src, r_trg).float() raw_scores = raw_scores.reshape(batch_size, batch_size, nb_feat_vectors ) raw_scores = raw_scores / emb_dim ** 0.5 lgt_reg = 0.05 * (raw_scores ** 2.0).mean() raw_scores = tanh_clip(raw_scores, clip_val=self.tclip) """ pos_scores includes scores for all the positive samples neg_scores includes scores for all the negative samples, with scores for positive samples set to the min score (-self.tclip here) """ pos_scores = (mask_pos * raw_scores).sum(dim=1) neg_scores = mask_neg * raw_scores - self.tclip * mask_pos neg_scores = neg_scores.reshape(batch_size, -1) mask_neg = mask_neg.reshape(batch_size, -1) neg_maxes = torch.max(neg_scores, dim=1, keepdim=True)[0] neg_sumexp = (mask_neg * torch.exp(neg_scores - neg_maxes)).sum(dim =1, keepdim=True) all_logsumexp = torch.log(torch.exp(pos_scores - neg_maxes) + neg_sumexp) pos_shiftexp = pos_scores - neg_maxes nce_scores = pos_shiftexp - all_logsumexp nce_scores = -nce_scores.mean() return nce_scores, lgt_reg def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'tclip': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tmp6 = 0.25 tmp7 = tmp5 * tmp6 tmp8 = libdevice.tanh(tmp7) tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = tmp2 * tmp10 tmp12 = tmp0 * tmp9 tmp13 = tmp11 - tmp12 tmp15 = tmp1 - tmp14 tmp17 = tmp16 * tmp4 tmp18 = tmp17 * tmp6 tmp19 = libdevice.tanh(tmp18) tmp20 = tmp19 * tmp9 tmp21 = tmp15 * tmp20 tmp22 = tmp14 * tmp9 tmp23 = tmp21 - tmp22 tmp24 = triton_helpers.maximum(tmp13, tmp23) tmp26 = tmp1 - tmp25 tmp28 = tmp27 * tmp4 tmp29 = tmp28 * tmp6 tmp30 = libdevice.tanh(tmp29) tmp31 = tmp30 * tmp9 tmp32 = tmp26 * tmp31 tmp33 = tmp25 * tmp9 tmp34 = tmp32 - tmp33 tmp35 = triton_helpers.maximum(tmp24, tmp34) tmp37 = tmp1 - tmp36 tmp39 = tmp38 * tmp4 tmp40 = tmp39 * tmp6 tmp41 = libdevice.tanh(tmp40) tmp42 = tmp41 * tmp9 tmp43 = tmp37 * tmp42 tmp44 = tmp36 * tmp9 tmp45 = tmp43 - tmp44 tmp46 = triton_helpers.maximum(tmp35, tmp45) tmp47 = tmp13 - tmp46 tmp48 = tl_math.exp(tmp47) tmp49 = tmp2 * tmp48 tmp50 = tmp23 - tmp46 tmp51 = tl_math.exp(tmp50) tmp52 = tmp15 * tmp51 tmp53 = tmp49 + tmp52 tmp54 = tmp34 - tmp46 tmp55 = tl_math.exp(tmp54) tmp56 = tmp26 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = tmp45 - tmp46 tmp59 = tl_math.exp(tmp58) tmp60 = tmp37 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp0 * tmp10 tmp63 = tmp14 * tmp20 tmp64 = tmp62 + tmp63 tmp65 = tmp25 * tmp31 tmp66 = tmp64 + tmp65 tmp67 = tmp36 * tmp42 tmp68 = tmp66 + tmp67 tmp69 = tmp68 - tmp46 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 + tmp61 tmp72 = tl_math.log(tmp71) tmp73 = tmp69 - tmp72 tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK]) tmp76 = tl.sum(tmp74, 1)[:, None] tmp77 = tmp76 / tmp9 tmp78 = -tmp77 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp78, None) @triton.jit def triton_per_fused_div_mean_mul_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tmp9 = 0.05 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, arg1_1, out=buf0) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf6 = buf4 del buf4 get_raw_stream(0) triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0[grid (1)](buf6, arg2_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5 del buf5 triton_per_fused_div_mean_mul_pow_1[grid(1)](buf7, buf0, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf6, buf7 def tanh_clip(x, clip_val=10.0): """ soft clip values to the range [-clip_val, +clip_val] """ if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class AmdimNCELossNew(nn.Module): """ Compute the NCE scores for predicting r_src->r_trg. """ def __init__(self, tclip): super().__init__() self.tclip = tclip def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
oke-aditya/pytorch-lightning-bolts
AmdimNCELoss
false
7,367
[ "Apache-2.0" ]
1
268df20bb442e7385b709b1488d37fd2767aba3c
https://github.com/oke-aditya/pytorch-lightning-bolts/tree/268df20bb442e7385b709b1488d37fd2767aba3c
ConditionTime
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2p/c2petxwjleg3lstqj5dww33hsyirxtuvcvvszujuzdtq6anolgyi.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] # Source node to ATen node mapping: # x => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %repeat], 2), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = (-4) + x1 tmp10 = tmp1 == tmp9 tmp11 = 1.0 tmp12 = 0.0 tmp13 = tl.where(tmp10, tmp11, tmp12) tmp14 = tmp13 * tmp11 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tl.where(tmp4, tmp5, tmp16) tl.store(out_ptr0 + (x3), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 4, 4), (512, 128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(arg0_1, buf0, 2048, grid=grid(2048), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn def condition_time(x, i=0, size=(12, 16), seq_len=15): """create one hot encoded time image-layers, i in [1, seq_len]""" assert i < seq_len times = torch.eye(seq_len, dtype=x.dtype, device=x.device)[i].unsqueeze(-1 ).unsqueeze(-1) ones = torch.ones(1, *size, dtype=x.dtype, device=x.device) return times * ones class ConditionTime(nn.Module): """Condition Time on a stack of images, adds `horizon` channels to image""" def __init__(self, horizon, ch_dim=2, num_dims=5): super().__init__() self.horizon = horizon self.ch_dim = ch_dim self.num_dims = num_dims def forward(self, x, fstep=0): """x stack of images, fsteps""" if self.num_dims == 5: bs, seq_len, ch, h, w = x.shape ct = condition_time(x, fstep, (h, w), seq_len=self.horizon).repeat( bs, seq_len, 1, 1, 1) else: bs, h, w, ch = x.shape ct = condition_time(x, fstep, (h, w), seq_len=self.horizon).repeat( bs, 1, 1, 1) ct = ct.permute(0, 2, 3, 1) x = torch.cat([x, ct], dim=self.ch_dim) assert x.shape[self.ch_dim] == ch + self.horizon return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'horizon': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = -4 + x1 tmp10 = tmp1 == tmp9 tmp11 = 1.0 tmp12 = 0.0 tmp13 = tl.where(tmp10, tmp11, tmp12) tmp14 = tmp13 * tmp11 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tl.where(tmp4, tmp5, tmp16) tl.store(out_ptr0 + x3, tmp17, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 8, 4, 4), (512, 128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(2048)](arg0_1, buf0, 2048, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def condition_time(x, i=0, size=(12, 16), seq_len=15): """create one hot encoded time image-layers, i in [1, seq_len]""" assert i < seq_len times = torch.eye(seq_len, dtype=x.dtype, device=x.device)[i].unsqueeze(-1 ).unsqueeze(-1) ones = torch.ones(1, *size, dtype=x.dtype, device=x.device) return times * ones class ConditionTimeNew(nn.Module): """Condition Time on a stack of images, adds `horizon` channels to image""" def __init__(self, horizon, ch_dim=2, num_dims=5): super().__init__() self.horizon = horizon self.ch_dim = ch_dim self.num_dims = num_dims def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
openclimatefix/MetNet
ConditionTime
false
7,368
[ "MIT" ]
1
06eed550e93da6325641958b0d36c15adde1d928
https://github.com/openclimatefix/MetNet/tree/06eed550e93da6325641958b0d36c15adde1d928
_ImpalaBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] # Source node to ATen node mapping: # x => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/o7/co7yez5qzvz5ywybvk4vlzzmoafzrb3epifmaj33nsuv2yxwmypv.py # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu] # Source node to ATen node mapping: # x_1 => _low_memory_max_pool2d_with_offsets, getitem_1 # x_2 => relu # Graph fragment: # %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 2) % 2 x0 = xindex % 2 x4 = (xindex // 2) x3 = xindex tmp0 = (-1) + (2*x1) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = (-1) + (2*x0) tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x4)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp12 = 2*x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x4)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + (2*x0) tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x4)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2*x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x4)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + ((2*x0) + (8*x4)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x4)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + (2*x1) tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x4)), tmp43 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x4)), tmp46 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x4)), tmp49 & xmask, eviction_policy='evict_last', other=float("-inf")) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = tl.full([1], 0, tl.int32) tmp78 = triton_helpers.maximum(tmp77, tmp51) tl.store(out_ptr0 + (x3), tmp51, xmask) tl.store(out_ptr1 + (x3), tmp76, xmask) tl.store(out_ptr2 + (x3), tmp78, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qv/cqvxbxuijiou474kzt3mgl6qbvm5mxnfuihqzhmr6kuqzjxfuhb4.py # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x_3 => convolution_1 # x_4 => relu_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {}) triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/bu/cbu6uxh3aqhltx43xtnaawqn5gfieybp3xlbr52mddgu2iyazdwt.py # Topologically Sorted Source Nodes: [x_5, x_6, x_7], Original ATen: [aten.convolution, aten.add, aten.relu] # Source node to ATen node mapping: # x_5 => convolution_2 # x_6 => add # x_7 => relu_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %getitem), kwargs = {}) # %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {}) triton_poi_fused_add_convolution_relu_3 = async_compile.triton('triton_poi_fused_add_convolution_relu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_relu_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/xg/cxgrg2abpwtygxb5gswaupodpqah65wppykemsbre6vd3xobmjvc.py # Topologically Sorted Source Nodes: [x_5, x_6, x_10, x_11], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # x_10 => convolution_4 # x_11 => add_1 # x_5 => convolution_2 # x_6 => add # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %getitem), kwargs = {}) # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %add), kwargs = {}) triton_poi_fused_add_convolution_4 = async_compile.triton('triton_poi_fused_add_convolution_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp2 + tmp7 tl.store(in_out_ptr0 + (x3), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4, ), (1, )) assert_size_stride(primals_10, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_11, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu] triton_poi_fused_max_pool2d_with_indices_relu_1.run(buf1, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1)) buf6 = buf5; del buf5 # reuse # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf6, primals_5, 64, grid=grid(64), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 2, 2), (16, 4, 2, 1)) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5, x_6, x_7], Original ATen: [aten.convolution, aten.add, aten.relu] triton_poi_fused_add_convolution_relu_3.run(buf7, primals_7, buf2, buf8, 64, grid=grid(64), stream=stream0) # Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 2, 2), (16, 4, 2, 1)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.relu] triton_poi_fused_convolution_relu_2.run(buf10, primals_9, 64, grid=grid(64), stream=stream0) del primals_9 # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution] buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 2, 2), (16, 4, 2, 1)) buf12 = buf11; del buf11 # reuse # Topologically Sorted Source Nodes: [x_5, x_6, x_10, x_11], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_4.run(buf12, primals_11, buf7, primals_7, buf2, 64, grid=grid(64), stream=stream0) del buf2 del buf7 del primals_11 del primals_7 return (buf12, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf4, buf6, buf8, buf10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class _ImpalaResBlock(nn.Module): def __init__(self, n_channels: 'int'): super().__init__() self.n_channels = n_channels kernel_size = 3 padding = 1 self.relu = nn.ReLU() self.relu_inplace = nn.ReLU() self.conv1 = nn.Conv2d(n_channels, n_channels, kernel_size, padding =padding) self.conv2 = nn.Conv2d(n_channels, n_channels, kernel_size, padding =padding) def forward(self, inputs): x = self.relu(inputs) x = self.conv1(x) x = self.relu_inplace(x) x = self.conv2(x) x += inputs return x class _ImpalaBlock(nn.Module): def __init__(self, n_channels_in: 'int', n_channels_out: 'int'): super().__init__() self.n_channels_in = n_channels_in self.n_channels_out = n_channels_out kernel_size = 3 padding = 1 self.conv1 = nn.Conv2d(n_channels_in, n_channels_out, kernel_size, padding=padding) self.pool = nn.MaxPool2d(kernel_size, stride=2, padding=padding) self.res1 = _ImpalaResBlock(n_channels_out) self.res2 = _ImpalaResBlock(n_channels_out) def forward(self, x): x = self.conv1(x) x = self.pool(x) x = self.res1(x) x = self.res2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_channels_in': 4, 'n_channels_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 2 x0 = xindex % 2 x4 = xindex // 2 x3 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x4), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x4), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + 2 * x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x4), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 2 * x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x4), tmp30 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), tmp33 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), tmp36 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + 2 * x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x4), tmp43 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), tmp46 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), tmp49 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tmp52 = tmp17 > tmp11 tmp53 = tl.full([1], 1, tl.int8) tmp54 = tl.full([1], 0, tl.int8) tmp55 = tl.where(tmp52, tmp53, tmp54) tmp56 = tmp24 > tmp18 tmp57 = tl.full([1], 2, tl.int8) tmp58 = tl.where(tmp56, tmp57, tmp55) tmp59 = tmp31 > tmp25 tmp60 = tl.full([1], 3, tl.int8) tmp61 = tl.where(tmp59, tmp60, tmp58) tmp62 = tmp34 > tmp32 tmp63 = tl.full([1], 4, tl.int8) tmp64 = tl.where(tmp62, tmp63, tmp61) tmp65 = tmp37 > tmp35 tmp66 = tl.full([1], 5, tl.int8) tmp67 = tl.where(tmp65, tmp66, tmp64) tmp68 = tmp44 > tmp38 tmp69 = tl.full([1], 6, tl.int8) tmp70 = tl.where(tmp68, tmp69, tmp67) tmp71 = tmp47 > tmp45 tmp72 = tl.full([1], 7, tl.int8) tmp73 = tl.where(tmp71, tmp72, tmp70) tmp74 = tmp50 > tmp48 tmp75 = tl.full([1], 8, tl.int8) tmp76 = tl.where(tmp74, tmp75, tmp73) tmp77 = tl.full([1], 0, tl.int32) tmp78 = triton_helpers.maximum(tmp77, tmp51) tl.store(out_ptr0 + x3, tmp51, xmask) tl.store(out_ptr1 + x3, tmp76, xmask) tl.store(out_ptr2 + x3, tmp78, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_add_convolution_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x3, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp2 + tmp7 tl.store(in_out_ptr0 + x3, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.int8) buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_1[grid(64)](buf1, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 2, 2), (16, 4, 2, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_relu_2[grid(64)](buf6, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 2, 2), (16, 4, 2, 1)) buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_add_convolution_relu_3[grid(64)](buf7, primals_7, buf2, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 2, 2), (16, 4, 2, 1)) buf10 = buf9 del buf9 triton_poi_fused_convolution_relu_2[grid(64)](buf10, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 2, 2), (16, 4, 2, 1)) buf12 = buf11 del buf11 triton_poi_fused_add_convolution_4[grid(64)](buf12, primals_11, buf7, primals_7, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del buf7 del primals_11 del primals_7 return (buf12, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf4, buf6, buf8, buf10) class _ImpalaResBlock(nn.Module): def __init__(self, n_channels: 'int'): super().__init__() self.n_channels = n_channels kernel_size = 3 padding = 1 self.relu = nn.ReLU() self.relu_inplace = nn.ReLU() self.conv1 = nn.Conv2d(n_channels, n_channels, kernel_size, padding =padding) self.conv2 = nn.Conv2d(n_channels, n_channels, kernel_size, padding =padding) def forward(self, inputs): x = self.relu(inputs) x = self.conv1(x) x = self.relu_inplace(x) x = self.conv2(x) x += inputs return x class _ImpalaBlockNew(nn.Module): def __init__(self, n_channels_in: 'int', n_channels_out: 'int'): super().__init__() self.n_channels_in = n_channels_in self.n_channels_out = n_channels_out kernel_size = 3 padding = 1 self.conv1 = nn.Conv2d(n_channels_in, n_channels_out, kernel_size, padding=padding) self.pool = nn.MaxPool2d(kernel_size, stride=2, padding=padding) self.res1 = _ImpalaResBlock(n_channels_out) self.res2 = _ImpalaResBlock(n_channels_out) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.res1.conv1.weight primals_5 = self.res1.conv1.bias primals_6 = self.res1.conv2.weight primals_7 = self.res1.conv2.bias primals_8 = self.res2.conv1.weight primals_9 = self.res2.conv1.bias primals_10 = self.res2.conv2.weight primals_11 = self.res2.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
nrfulton/vsrl-framework
_ImpalaBlock
false
7,369
[ "MIT" ]
1
c778824b3285e3e994a4c5846c7b1c2ac03c669b
https://github.com/nrfulton/vsrl-framework/tree/c778824b3285e3e994a4c5846c7b1c2ac03c669b
MILLR
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/hj/chjyxthnzrs73ha46hvtenecvp5ah45h7qgnlfwqncgkfrqqsj65.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {}) triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gt/cgtftm3ylp7qfttvxomzpetwvamrrbiunk5krcwc6yk2fwawxwru.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => max_1 # Graph fragment: # %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%squeeze, -1), kwargs = {}) triton_poi_fused_max_1 = async_compile.triton('triton_poi_fused_max_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + (x0), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] stream0 = get_raw_stream(0) triton_poi_fused_sigmoid_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] triton_poi_fused_max_1.run(buf1, buf2, 4, grid=grid(4), stream=stream0) return (reinterpret_tensor(buf2, (4, 1), (1, 1), 0), reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn import torch as tc from sklearn.metrics import * from torch.utils.data import DataLoader from torch.utils.data import WeightedRandomSampler class myDataset(torch.utils.data.Dataset): def __init__(self, x, y): self.x = x self.y = y def __len__(self): return len(self.y) def __getitem__(self, idx): return self.x[idx], self.y[idx] class MILLR(tc.nn.Module): def __init__(self, input_dim, flight_length, device, aggregation= 'maxpool', output_resize=False): super(MILLR, self).__init__() self.fc = nn.Linear(input_dim, 1) self.sigmoid = nn.Sigmoid() self.flight_length = flight_length self.D = input_dim self.device = device self.task = 'binary' self.threshold = 0.5 self.agg = aggregation self.output_resize = output_resize def forward(self, x, train=True): _N, _, _ = x.size() self.pi = self.sigmoid(self.fc(x)).squeeze() self.proba_time = self.pi if self.agg == 'mean': p = tc.mean(self.pi, axis=-1) elif self.agg == 'maxpool': p = tc.max(self.pi, dim=-1)[0] p = p.view(-1, 1) return p def get_feature_importance(self, columns, n_top=5): coeffs = self.fc.weight.flatten().detach().numpy() sorted_feat_idx = np.argsort(coeffs)[::-1] sorted_columns = columns[sorted_feat_idx[:n_top]] top_values = coeffs[sorted_feat_idx[:n_top]] return sorted_columns, top_values def cross_time_steps_loss(self, Pi): diff = (Pi[:, :-1] - Pi[:, 1:]) ** 2 return tc.mean(tc.mean(diff, axis=-1)) def train_LR(self, X_train, y_train, X_val, y_val, batch_size, print_every_epochs=5, l2=0.001, learning_rate=0.001, use_stratified_batch_size=True, verbose=1, num_epochs=100, optimizer='adam', momentum=0.99): self.train() if 'cuda' in self.device: self else: self.cpu() self.batch_size = batch_size criterion = nn.BCELoss() if optimizer == 'adam': optimizer = tc.optim.Adam(self.parameters(), lr=learning_rate, weight_decay=l2) else: optimizer = tc.optim.SGD(self.parameters(), momentum=momentum, lr=learning_rate, weight_decay=l2) hist = np.zeros(num_epochs) val_hist = np.zeros(num_epochs) b_acc = np.zeros(num_epochs) val_b_acc = np.zeros(num_epochs) f1 = np.zeros(num_epochs) val_f1 = np.zeros(num_epochs) if not tc.is_tensor(X_train): X_train = tc.Tensor(X_train) if not tc.is_tensor(y_train): y_train = tc.Tensor(y_train.flatten()) if X_val is not None: if not tc.is_tensor(X_val): X_val = tc.Tensor(X_val) if not tc.is_tensor(y_val): y_val = tc.Tensor(y_val) data_val = myDataset(X_val, y_val) data_train = myDataset(X_train, y_train) if use_stratified_batch_size is False: None dataloader_train = DataLoader(data_train, batch_size=self. batch_size, shuffle=True) else: None weights = [] for label in tc.unique(y_train): count = len(tc.where(y_train == label)[0]) weights.append(1 / count) weights = tc.tensor(weights) samples_weights = weights[y_train.type(tc.LongTensor)] sampler = WeightedRandomSampler(samples_weights, len( samples_weights), replacement=True) dataloader_train = DataLoader(data_train, batch_size=self. batch_size, sampler=sampler) if X_val is not None: dataloader_val = DataLoader(data_val, batch_size=self. batch_size, shuffle=False) try: for epoch in tqdm(range(num_epochs)): batch_acc = [] batch_val_acc = [] batch_f1 = [] batch_val_f1 = [] for iteration, (batch_x, batch_y) in enumerate(dataloader_train ): batch_x, batch_y = batch_x, batch_y if epoch == 0 and iteration == 0: for c in tc.unique(y_train): None outputs = self.forward(batch_x) g_loss = self.cross_time_steps_loss(self.pi) loss = criterion(outputs.flatten(), batch_y.view(-1). flatten()) + g_loss hist[epoch] = loss.item() if 'cuda' in self.device: temp_outpouts = (outputs.cpu().detach().numpy() > self.threshold).astype(int) y_batch = batch_y.view(-1).cpu().detach().numpy() b_acc[epoch] = balanced_accuracy_score(y_batch, temp_outpouts) else: temp_outpouts = (outputs.detach().numpy() > self. threshold).astype(int) y_batch = batch_y.view(-1).detach().numpy() b_acc[epoch] = balanced_accuracy_score(y_batch, temp_outpouts) batch_acc.append(b_acc[epoch]) batch_f1.append(f1_score(y_batch, temp_outpouts, average='binary')) optimizer.zero_grad() loss.backward() optimizer.step() if X_val is not None: with tc.no_grad(): mini_loss = [] for batch_X_val, batch_y_val in dataloader_val: batch_X_val, batch_y_val = (batch_X_val, batch_y_val) self.valYhat = self.forward(batch_X_val) g_loss_val = self.cross_time_steps_loss(self.pi ) val_loss = criterion(self.valYhat, batch_y_val.flatten()) + g_loss_val mini_loss.append(val_loss.item()) if self.task == 'binary': if 'cuda' in self.device: temp_out_y = (self.valYhat.cpu().detach ().numpy() > self.threshold).astype(int ) y_val_batch = batch_y_val.view(-1).cpu( ).detach().numpy() val_b_acc[epoch] = balanced_accuracy_score( y_val_batch, temp_out_y) else: temp_out_y = (self.valYhat.detach(). numpy() > self.threshold).astype(int) y_val_batch = batch_y_val.view(-1).detach( ).numpy() val_b_acc[epoch] = balanced_accuracy_score( y_val_batch, temp_out_y) batch_val_acc.append(val_b_acc[epoch]) batch_val_f1.append(f1_score( y_val_batch, temp_out_y, average= 'binary')) val_hist[epoch] = np.mean(mini_loss) if verbose == 1: if self.task == 'binary': if epoch % 10 == 0: None None None if X_val is not None: None None None elif epoch % print_every_epochs == 0: None if self.task == 'binary': b_acc[epoch] = np.mean(batch_acc) val_b_acc[epoch] = np.mean(batch_val_acc) f1[epoch] = np.mean(batch_f1) val_f1[epoch] = np.mean(batch_val_f1) except KeyboardInterrupt: self.cpu() self.device = 'cpu' self.eval() self.x_train = X_train self.x_test = X_val self.hist = hist self.val_hist = val_hist except: raise self.cpu() self.device = 'cpu' self.eval() self.x_train = X_train self.x_test = X_val self.hist = hist self.val_hist = val_hist def fit(self, **kw): self.train_LR(**kw) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'flight_length': 4, 'device': 0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn import torch as tc from sklearn.metrics import * from torch.utils.data import DataLoader from torch.utils.data import WeightedRandomSampler assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_max_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(16)](buf1, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_max_1[grid(4)](buf1, buf2, 4, XBLOCK=4, num_warps= 1, num_stages=1) return reinterpret_tensor(buf2, (4, 1), (1, 1), 0), reinterpret_tensor(buf1 , (4, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf1 class myDataset(torch.utils.data.Dataset): def __init__(self, x, y): self.x = x self.y = y def __len__(self): return len(self.y) def __getitem__(self, idx): return self.x[idx], self.y[idx] class MILLRNew(tc.nn.Module): def __init__(self, input_dim, flight_length, device, aggregation= 'maxpool', output_resize=False): super(MILLRNew, self).__init__() self.fc = nn.Linear(input_dim, 1) self.sigmoid = nn.Sigmoid() self.flight_length = flight_length self.D = input_dim self.device = device self.task = 'binary' self.threshold = 0.5 self.agg = aggregation self.output_resize = output_resize def get_feature_importance(self, columns, n_top=5): coeffs = self.fc.weight.flatten().detach().numpy() sorted_feat_idx = np.argsort(coeffs)[::-1] sorted_columns = columns[sorted_feat_idx[:n_top]] top_values = coeffs[sorted_feat_idx[:n_top]] return sorted_columns, top_values def cross_time_steps_loss(self, Pi): diff = (Pi[:, :-1] - Pi[:, 1:]) ** 2 return tc.mean(tc.mean(diff, axis=-1)) def train_LR(self, X_train, y_train, X_val, y_val, batch_size, print_every_epochs=5, l2=0.001, learning_rate=0.001, use_stratified_batch_size=True, verbose=1, num_epochs=100, optimizer='adam', momentum=0.99): self.train() if 'cuda' in self.device: self else: self.cpu() self.batch_size = batch_size criterion = nn.BCELoss() if optimizer == 'adam': optimizer = tc.optim.Adam(self.parameters(), lr=learning_rate, weight_decay=l2) else: optimizer = tc.optim.SGD(self.parameters(), momentum=momentum, lr=learning_rate, weight_decay=l2) hist = np.zeros(num_epochs) val_hist = np.zeros(num_epochs) b_acc = np.zeros(num_epochs) val_b_acc = np.zeros(num_epochs) f1 = np.zeros(num_epochs) val_f1 = np.zeros(num_epochs) if not tc.is_tensor(X_train): X_train = tc.Tensor(X_train) if not tc.is_tensor(y_train): y_train = tc.Tensor(y_train.flatten()) if X_val is not None: if not tc.is_tensor(X_val): X_val = tc.Tensor(X_val) if not tc.is_tensor(y_val): y_val = tc.Tensor(y_val) data_val = myDataset(X_val, y_val) data_train = myDataset(X_train, y_train) if use_stratified_batch_size is False: None dataloader_train = DataLoader(data_train, batch_size=self. batch_size, shuffle=True) else: None weights = [] for label in tc.unique(y_train): count = len(tc.where(y_train == label)[0]) weights.append(1 / count) weights = tc.tensor(weights) samples_weights = weights[y_train.type(tc.LongTensor)] sampler = WeightedRandomSampler(samples_weights, len( samples_weights), replacement=True) dataloader_train = DataLoader(data_train, batch_size=self. batch_size, sampler=sampler) if X_val is not None: dataloader_val = DataLoader(data_val, batch_size=self. batch_size, shuffle=False) try: for epoch in tqdm(range(num_epochs)): batch_acc = [] batch_val_acc = [] batch_f1 = [] batch_val_f1 = [] for iteration, (batch_x, batch_y) in enumerate(dataloader_train ): batch_x, batch_y = batch_x, batch_y if epoch == 0 and iteration == 0: for c in tc.unique(y_train): None outputs = self.forward(batch_x) g_loss = self.cross_time_steps_loss(self.pi) loss = criterion(outputs.flatten(), batch_y.view(-1). flatten()) + g_loss hist[epoch] = loss.item() if 'cuda' in self.device: temp_outpouts = (outputs.cpu().detach().numpy() > self.threshold).astype(int) y_batch = batch_y.view(-1).cpu().detach().numpy() b_acc[epoch] = balanced_accuracy_score(y_batch, temp_outpouts) else: temp_outpouts = (outputs.detach().numpy() > self. threshold).astype(int) y_batch = batch_y.view(-1).detach().numpy() b_acc[epoch] = balanced_accuracy_score(y_batch, temp_outpouts) batch_acc.append(b_acc[epoch]) batch_f1.append(f1_score(y_batch, temp_outpouts, average='binary')) optimizer.zero_grad() loss.backward() optimizer.step() if X_val is not None: with tc.no_grad(): mini_loss = [] for batch_X_val, batch_y_val in dataloader_val: batch_X_val, batch_y_val = (batch_X_val, batch_y_val) self.valYhat = self.forward(batch_X_val) g_loss_val = self.cross_time_steps_loss(self.pi ) val_loss = criterion(self.valYhat, batch_y_val.flatten()) + g_loss_val mini_loss.append(val_loss.item()) if self.task == 'binary': if 'cuda' in self.device: temp_out_y = (self.valYhat.cpu().detach ().numpy() > self.threshold).astype(int ) y_val_batch = batch_y_val.view(-1).cpu( ).detach().numpy() val_b_acc[epoch] = balanced_accuracy_score( y_val_batch, temp_out_y) else: temp_out_y = (self.valYhat.detach(). numpy() > self.threshold).astype(int) y_val_batch = batch_y_val.view(-1).detach( ).numpy() val_b_acc[epoch] = balanced_accuracy_score( y_val_batch, temp_out_y) batch_val_acc.append(val_b_acc[epoch]) batch_val_f1.append(f1_score( y_val_batch, temp_out_y, average= 'binary')) val_hist[epoch] = np.mean(mini_loss) if verbose == 1: if self.task == 'binary': if epoch % 10 == 0: None None None if X_val is not None: None None None elif epoch % print_every_epochs == 0: None if self.task == 'binary': b_acc[epoch] = np.mean(batch_acc) val_b_acc[epoch] = np.mean(batch_val_acc) f1[epoch] = np.mean(batch_f1) val_f1[epoch] = np.mean(batch_val_f1) except KeyboardInterrupt: self.cpu() self.device = 'cpu' self.eval() self.x_train = X_train self.x_test = X_val self.hist = hist self.val_hist = val_hist except: raise self.cpu() self.device = 'cpu' self.eval() self.x_train = X_train self.x_test = X_val self.hist = hist self.val_hist = val_hist def fit(self, **kw): self.train_LR(**kw) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
mhbl3/PrecursorAnalysis
MILLR
false
7,370
[ "MIT" ]
1
aaa2fe0219ad579b9126fef9cc8594a59ae66815
https://github.com/mhbl3/PrecursorAnalysis/tree/aaa2fe0219ad579b9126fef9cc8594a59ae66815
PEG
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gu/cguvq7dzmhomcchbxvk4mcgrc7aszgsh5ytmkbdn727ju3aina23.py # Topologically Sorted Source Nodes: [conv2d, add], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # add => add # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 4), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_3), kwargs = {}) triton_poi_fused_add_convolution_0 = async_compile.triton('triton_poi_fused_add_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, add], Original ATen: [aten.convolution, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_convolution_0.run(buf1, primals_2, primals_3, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEG(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, x): return self.proj(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEGNew(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, input_0): primals_1 = self.proj.fn.weight primals_2 = self.proj.fn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
onlyrico/vit-pytorch
PEG
false
7,371
[ "MIT" ]
1
e52ac4195550faa9c3372533d325bf649f7354ad
https://github.com/onlyrico/vit-pytorch/tree/e52ac4195550faa9c3372533d325bf649f7354ad
SpatialGather_Module
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/iv/civr7hz7pwb7nd5q352sqsjvxezkx6m6jnyztaygkt2ugewh5ejx.py # Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax] # Source node to ATen node mapping: # probs_1 => div, exp, sum_1 # Graph fragment: # %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {}) # %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {}) # %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {}) # %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[16, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 16 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float("-inf")) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + (16*x0)), tmp14, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_per_fused__softmax_0.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [probs_1, matmul], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg1_1 del buf2 return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torchvision.transforms import functional as F import torch.nn as nn import torch.nn.functional as F class SpatialGather_Module(nn.Module): """ Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. """ def __init__(self, cls_num=0, scale=1): super(SpatialGather_Module, self).__init__() self.cls_num = cls_num self.scale = scale def forward(self, feats, probs, gt_probs=None): batch_size, c, _h, _w = probs.size(0), probs.size(1), probs.size(2 ), probs.size(3) probs = probs.view(batch_size, c, -1) feats = feats.view(batch_size, feats.size(1), -1) feats = feats.permute(0, 2, 1) probs = F.softmax(self.scale * probs, dim=2) ocr_context = torch.matmul(probs, feats).permute(0, 2, 1).unsqueeze(3) return ocr_context def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK= 1, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg1_1 del buf2 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0), class SpatialGather_ModuleNew(nn.Module): """ Aggregate the context features according to the initial predicted probability distribution. Employ the soft-weighted method to aggregate the context. """ def __init__(self, cls_num=0, scale=1): super(SpatialGather_ModuleNew, self).__init__() self.cls_num = cls_num self.scale = scale def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
openseg-group/panoptic-deeplab
SpatialGather_Module
false
7,372
[ "Apache-2.0" ]
1
818887597e75af77ba32185eb67d8aeac47b54fe
https://github.com/openseg-group/panoptic-deeplab/tree/818887597e75af77ba32185eb67d8aeac47b54fe
Qux
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/bj/cbjesanz52gunefusavp3qpbw4kokz2sx2jddma7ulw6vhss425c.py # Topologically Sorted Source Nodes: [sub, sub_1], Original ATen: [aten.sub] # Source node to ATen node mapping: # sub => sub # sub_1 => sub_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, 4), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 - tmp1 tmp3 = 4.0 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, sub_1], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class Qux(torch.nn.Module): def __init__(self, x): super(Qux, self).__init__() self.x = x def forward(self, a, b): return a - b - self.x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'x': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 - tmp1 tmp3 = 4.0 tmp4 = tmp2 - tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class QuxNew(torch.nn.Module): def __init__(self, x): super(QuxNew, self).__init__() self.x = x def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
Qux
false
7,373
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
Discriminator
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oe/coeqskvavjig2xfptqbr47z2ukzhyagx4dxozc4kzjwb2ykujlu4.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_1 => gt, mul, where # Graph fragment: # %add_tensor_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_3, %primals_3), kwargs = {}) # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_3, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_3, 0.2), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_3, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uo/cuorclbf4h3pyyjppsftkziufkt34vxjph7yux3ly7ujjfqkt7ad.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_3 => gt_1, mul_1, where_1 # Graph fragment: # %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_5), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.2), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_2, %mul_1), kwargs = {}) triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/mf/cmfwsgrlk36ejtu74rx7ipaja6rfirwmwjgqhdqter22spc6ajam.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # x_5 => gt_2, mul_2, where_2 # Graph fragment: # %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {}) # %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.2), kwargs = {}) # %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_tensor_1, %mul_2), kwargs = {}) triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, xmask) tl.store(out_ptr1 + (x2), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rh/crhvyy3w3uejbzndu7qftnyc25sndrfzlmb3i2bzpyadobz7z7bm.py # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] # Source node to ATen node mapping: # sigmoid => sigmoid # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1024, 4), (4, 1)) assert_size_stride(primals_3, (1024, ), (1, )) assert_size_stride(primals_4, (512, 1024), (1024, 1)) assert_size_stride(primals_5, (512, ), (1, )) assert_size_stride(primals_6, (256, 512), (512, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (1, 256), (256, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 4096, grid=grid(4096), stream=stream0) del buf0 del primals_3 # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.leaky_relu, aten.native_dropout] buf3 = torch.ops.aten.native_dropout.default(buf2, 0.3, True) del buf2 buf4 = buf3[0] buf5 = buf3[1] del buf3 buf6 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf6) buf7 = empty_strided_cuda((4, 512), (512, 1), torch.bool) buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_1.run(buf6, primals_5, buf7, buf8, 2048, grid=grid(2048), stream=stream0) del buf6 del primals_5 # Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.leaky_relu, aten.native_dropout] buf9 = torch.ops.aten.native_dropout.default(buf8, 0.3, True) del buf8 buf10 = buf9[0] buf11 = buf9[1] del buf9 buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf12) buf13 = empty_strided_cuda((4, 256), (256, 1), torch.bool) buf14 = empty_strided_cuda((4, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.leaky_relu] triton_poi_fused_leaky_relu_2.run(buf12, primals_7, buf13, buf14, 1024, grid=grid(1024), stream=stream0) del buf12 del primals_7 # Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.leaky_relu, aten.native_dropout] buf15 = torch.ops.aten.native_dropout.default(buf14, 0.3, True) del buf14 buf16 = buf15[0] buf17 = buf15[1] del buf15 buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (256, 1), (1, 256), 0), out=buf18) buf19 = buf18; del buf18 # reuse # Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid] triton_poi_fused_sigmoid_3.run(buf19, primals_9, 4, grid=grid(4), stream=stream0) del primals_9 return (buf19, primals_1, buf1, buf4, buf5, buf7, buf10, buf11, buf13, buf16, buf17, buf19, primals_8, primals_6, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn as nn from torch.nn import functional as F from torch import optim as optim class Discriminator(nn.Module): def __init__(self, img_shape, hidden_dim=1024): super().__init__() in_dim = int(np.prod(img_shape)) self.fc1 = nn.Linear(in_dim, hidden_dim) self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2) self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2) self.fc4 = nn.Linear(self.fc3.out_features, 1) def forward(self, img): x = img.view(img.size(0), -1) x = F.leaky_relu(self.fc1(x), 0.2) x = F.dropout(x, 0.3) x = F.leaky_relu(self.fc2(x), 0.2) x = F.dropout(x, 0.3) x = F.leaky_relu(self.fc3(x), 0.2) x = F.dropout(x, 0.3) return torch.sigmoid(self.fc4(x)) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'img_shape': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn as nn from torch import optim as optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1024, 4), (4, 1)) assert_size_stride(primals_3, (1024,), (1,)) assert_size_stride(primals_4, (512, 1024), (1024, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (256, 512), (512, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (1, 256), (256, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024 ), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(4096)](buf0, primals_3, buf1, buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = torch.ops.aten.native_dropout.default(buf2, 0.3, True) del buf2 buf4 = buf3[0] buf5 = buf3[1] del buf3 buf6 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf6) buf7 = empty_strided_cuda((4, 512), (512, 1), torch.bool) buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(2048)](buf6, primals_5, buf7, buf8, 2048, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_5 buf9 = torch.ops.aten.native_dropout.default(buf8, 0.3, True) del buf8 buf10 = buf9[0] buf11 = buf9[1] del buf9 buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf12) buf13 = empty_strided_cuda((4, 256), (256, 1), torch.bool) buf14 = empty_strided_cuda((4, 256), (256, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(1024)](buf12, primals_7, buf13, buf14, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del primals_7 buf15 = torch.ops.aten.native_dropout.default(buf14, 0.3, True) del buf14 buf16 = buf15[0] buf17 = buf15[1] del buf15 buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (256, 1), (1, 256), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_sigmoid_3[grid(4)](buf19, primals_9, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_9 return (buf19, primals_1, buf1, buf4, buf5, buf7, buf10, buf11, buf13, buf16, buf17, buf19, primals_8, primals_6, primals_4) class DiscriminatorNew(nn.Module): def __init__(self, img_shape, hidden_dim=1024): super().__init__() in_dim = int(np.prod(img_shape)) self.fc1 = nn.Linear(in_dim, hidden_dim) self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2) self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2) self.fc4 = nn.Linear(self.fc3.out_features, 1) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
oke-aditya/pytorch-lightning-bolts
Discriminator
false
7,374
[ "Apache-2.0" ]
1
268df20bb442e7385b709b1488d37fd2767aba3c
https://github.com/oke-aditya/pytorch-lightning-bolts/tree/268df20bb442e7385b709b1488d37fd2767aba3c
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qg/cqg3e6qbl457uja37r5t7mwrhshp5howpejrpt2vy5kg7sjg7kkn.py # Topologically Sorted Source Nodes: [conv1d_1, add, out_tanh, conv1d_3, add_1, out_gate, mul], Original ATen: [aten.convolution, aten.add, aten.tanh, aten.sigmoid, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # conv1d_1 => convolution_1 # conv1d_3 => convolution_3 # mul => mul # out_gate => sigmoid # out_tanh => tanh # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_5, %primals_3, %primals_4, [1], [0], [1], False, [0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_3, %convolution_1), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) # %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_5, %primals_7, %primals_8, [1], [0], [1], False, [0], 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_6, %convolution_3), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %sigmoid), kwargs = {}) triton_poi_fused_add_convolution_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_convolution_mul_sigmoid_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mul_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = (xindex // 4) x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_ptr0 + (x0 + (7*x4)), xmask) tmp1 = tl.load(in_out_ptr0 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x0 + (7*x4)), xmask) tmp7 = tl.load(in_out_ptr1 + (x3), xmask) tmp8 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp12 = tmp5 * tmp11 tl.store(in_out_ptr0 + (x3), tmp5, xmask) tl.store(in_out_ptr1 + (x3), tmp11, xmask) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/us/custepuaxcgmeci6okpmedesxdvzu45qqfj6xlln2yvyowwq2gle.py # Topologically Sorted Source Nodes: [conv1d_4, output], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # conv1d_4 => convolution_4 # output => add_2 # Graph fragment: # %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_9, %primals_10, [1], [0], [1], False, [0], 1), kwargs = {}) # %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %primals_2), kwargs = {}) triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py # Topologically Sorted Source Nodes: [skip], Original ATen: [aten.convolution] # Source node to ATen node mapping: # skip => convolution_5 # Graph fragment: # %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_2, %primals_11, %primals_12, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4, ), (1, )) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4, ), (1, )) assert_size_stride(primals_11, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_12, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7), (28, 7, 1)) # Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_5, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf3 = extern_kernels.convolution(primals_2, primals_6, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 7), (28, 7, 1)) # Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(primals_5, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf2 = buf1; del buf1 # reuse buf5 = buf4; del buf4 # reuse buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv1d_1, add, out_tanh, conv1d_3, add_1, out_gate, mul], Original ATen: [aten.convolution, aten.add, aten.tanh, aten.sigmoid, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_convolution_mul_sigmoid_tanh_0.run(buf2, buf5, buf0, primals_4, buf3, primals_8, buf6, 64, grid=grid(64), stream=stream0) del buf0 del buf3 del primals_4 del primals_8 # Topologically Sorted Source Nodes: [conv1d_4], Original ATen: [aten.convolution] buf7 = extern_kernels.convolution(buf6, primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4), (16, 4, 1)) buf8 = buf7; del buf7 # reuse # Topologically Sorted Source Nodes: [conv1d_4, output], Original ATen: [aten.convolution, aten.add] triton_poi_fused_add_convolution_1.run(buf8, primals_10, primals_2, 64, grid=grid(64), stream=stream0) del primals_10 # Topologically Sorted Source Nodes: [skip], Original ATen: [aten.convolution] buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4), (16, 4, 1)) buf10 = buf9; del buf9 # reuse # Topologically Sorted Source Nodes: [skip], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf10, primals_12, 64, grid=grid(64), stream=stream0) del primals_12 return (buf8, buf10, primals_1, primals_2, primals_3, primals_5, primals_6, primals_7, primals_9, primals_11, buf2, buf5, buf6, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, bias=False): super(CausalConv1d, self).__init__() self.padding = padding = (kernel_size - 1) * dilation self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, dilation=dilation, bias=bias) def forward(self, x): x = self.conv(x) if self.padding != 0: x = x[:, :, :-self.padding] return x class ResBlock(nn.Module): def __init__(self, dilation, aux_channels, n_channels, skip_channels, kernel_size, fast_inference=False): super(ResBlock, self).__init__() conv_dilation = 1 if fast_inference else dilation self.filter = CausalConv1d(n_channels, n_channels, kernel_size= kernel_size, dilation=conv_dilation) self.gate = CausalConv1d(n_channels, n_channels, kernel_size= kernel_size, dilation=conv_dilation) self.aux_filter = nn.Conv1d(aux_channels, n_channels, kernel_size=1) self.aux_gate = nn.Conv1d(aux_channels, n_channels, kernel_size=1) if fast_inference: self.queue = None self.buffer_size = conv_dilation * 2 * (kernel_size - 1) self.fast_inference = fast_inference self.permute = nn.Conv1d(n_channels, n_channels, kernel_size=1) self.skip = nn.Conv1d(n_channels, skip_channels, kernel_size=1) def forward(self, x, h): if self.fast_inference: pass else: out_tanh = torch.tanh(self.filter(x) + self.aux_filter(h)) out_gate = torch.sigmoid(self.gate(x) + self.aux_gate(h)) output = self.permute(out_tanh * out_gate) + x skip = self.skip(output) return output, skip def clear(self): self.queue = None def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dilation': 1, 'aux_channels': 4, 'n_channels': 4, 'skip_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_convolution_mul_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex // 4 x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + (x0 + 7 * x4), xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x0 + 7 * x4), xmask) tmp7 = tl.load(in_out_ptr1 + x3, xmask) tmp8 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp11 = tl.sigmoid(tmp10) tmp12 = tmp5 * tmp11 tl.store(in_out_ptr0 + x3, tmp5, xmask) tl.store(in_out_ptr1 + x3, tmp11, xmask) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 7), (28, 7, 1)) buf1 = extern_kernels.convolution(primals_5, primals_3, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) buf3 = extern_kernels.convolution(primals_2, primals_6, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 7), (28, 7, 1)) buf4 = extern_kernels.convolution(primals_5, primals_7, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_convolution_mul_sigmoid_tanh_0[grid(64)](buf2, buf5, buf0, primals_4, buf3, primals_8, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf3 del primals_4 del primals_8 buf7 = extern_kernels.convolution(buf6, primals_9, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4), (16, 4, 1)) buf8 = buf7 del buf7 triton_poi_fused_add_convolution_1[grid(64)](buf8, primals_10, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf9 = extern_kernels.convolution(buf8, primals_11, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4), (16, 4, 1)) buf10 = buf9 del buf9 triton_poi_fused_convolution_2[grid(64)](buf10, primals_12, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 return (buf8, buf10, primals_1, primals_2, primals_3, primals_5, primals_6, primals_7, primals_9, primals_11, buf2, buf5, buf6, buf8) class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, dilation=1, bias=False): super(CausalConv1d, self).__init__() self.padding = padding = (kernel_size - 1) * dilation self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, dilation=dilation, bias=bias) def forward(self, x): x = self.conv(x) if self.padding != 0: x = x[:, :, :-self.padding] return x class ResBlockNew(nn.Module): def __init__(self, dilation, aux_channels, n_channels, skip_channels, kernel_size, fast_inference=False): super(ResBlockNew, self).__init__() conv_dilation = 1 if fast_inference else dilation self.filter = CausalConv1d(n_channels, n_channels, kernel_size= kernel_size, dilation=conv_dilation) self.gate = CausalConv1d(n_channels, n_channels, kernel_size= kernel_size, dilation=conv_dilation) self.aux_filter = nn.Conv1d(aux_channels, n_channels, kernel_size=1) self.aux_gate = nn.Conv1d(aux_channels, n_channels, kernel_size=1) if fast_inference: self.queue = None self.buffer_size = conv_dilation * 2 * (kernel_size - 1) self.fast_inference = fast_inference self.permute = nn.Conv1d(n_channels, n_channels, kernel_size=1) self.skip = nn.Conv1d(n_channels, skip_channels, kernel_size=1) def clear(self): self.queue = None def forward(self, input_0, input_1): primals_1 = self.filter.conv.weight primals_2 = self.gate.conv.weight primals_3 = self.aux_filter.weight primals_4 = self.aux_filter.bias primals_7 = self.aux_gate.weight primals_8 = self.aux_gate.bias primals_9 = self.permute.weight primals_10 = self.permute.bias primals_11 = self.skip.weight primals_12 = self.skip.bias primals_5 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0], output[1]
oleges1/TTS
ResBlock
false
7,375
[ "MIT" ]
1
19b389714078729fae29faf9c23112bdbe4c8dec
https://github.com/oleges1/TTS/tree/19b389714078729fae29faf9c23112bdbe4c8dec
RepeatModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/px/cpximxrunnn7xlivxg7ckdm4rpo2iaqrxs5ifae2ywvsmxn5yuti.py # Topologically Sorted Source Nodes: [tensor, repeat], Original ATen: [aten.add, aten.repeat] # Source node to ATen node mapping: # repeat => repeat # tensor => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%add, [4]), kwargs = {}) triton_poi_fused_add_repeat_0 = async_compile.triton('triton_poi_fused_add_repeat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [tensor, repeat], Original ATen: [aten.add, aten.repeat] stream0 = get_raw_stream(0) triton_poi_fused_add_repeat_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class RepeatModule(torch.nn.Module): def __init__(self, repeats): super(RepeatModule, self).__init__() self.repeats = repeats def forward(self, tensor): tensor = tensor + tensor return tensor.repeat(self.repeats) def get_inputs(): return [torch.rand([4])] def get_init_inputs(): return [[], {'repeats': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_repeat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0 % 4, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_add_repeat_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class RepeatModuleNew(torch.nn.Module): def __init__(self, repeats): super(RepeatModuleNew, self).__init__() self.repeats = repeats def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
RepeatModule
false
7,376
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
Grounding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/aw/cawte7o5l2qedgknpzshnd4zn5jxxhdbpkkqxowzkptjrvnqufl4.py # Topologically Sorted Source Nodes: [logits_1, logits_2, squeeze], Original ATen: [aten.div, aten.add, aten.squeeze] # Source node to ATen node mapping: # logits_1 => div # logits_2 => add # squeeze => squeeze # Graph fragment: # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_8, 1.4142135623730951), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_7), kwargs = {}) # %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%add,), kwargs = {}) triton_poi_fused_add_div_squeeze_0 = async_compile.triton('triton_poi_fused_add_div_squeeze_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_squeeze_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_squeeze_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last') tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (x4), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2, ), (1, )) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [Q], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [K], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 2), (8, 2, 1), 0), reinterpret_tensor(buf1, (4, 2, 4), (8, 1, 2), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits_1, logits_2, squeeze], Original ATen: [aten.div, aten.add, aten.squeeze] stream0 = get_raw_stream(0) triton_poi_fused_add_div_squeeze_0.run(buf2, primals_7, buf3, 256, grid=grid(256), stream=stream0) del buf2 del primals_7 return (buf3, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 2, 4), (8, 1, 2), 0), reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import math import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch as th from torchvision.ops.boxes import * from torchvision.transforms.functional import * class Grounding(nn.Module): def __init__(self, cfgT, cfgI, heads=1): super(Grounding, self).__init__() projection = cfgI.hidden_size // 2 self.num_attention_heads = heads self.attention_head_size = int(projection // self.num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.Q = nn.Linear(cfgT.hidden_size, self.all_head_size) self.K = nn.Linear(cfgI.hidden_size, self.all_head_size) self.cfgT = cfgT self.cfgI = cfgI def transpose(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, encT, encI, mask): Q = self.Q(encT) K = self.K(encI) Q = self.transpose(Q) K = self.transpose(K) logits = th.matmul(Q, K.transpose(-1, -2)) logits = logits / math.sqrt(self.attention_head_size) logits = logits + mask return logits.squeeze() def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'cfgT': _mock_config(hidden_size=4), 'cfgI': _mock_config( hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed from torchvision.ops.boxes import * from torchvision.transforms.functional import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_squeeze_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex % 64 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp1 = 0.7071067811865475 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x4, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (2, 4), (4, 1)) assert_size_stride(primals_2, (2,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 2), (8, 2, 1), 0 ), reinterpret_tensor(buf1, (4, 2, 4), (8, 1, 2), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_squeeze_0[grid(256)](buf2, primals_7, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_7 return buf3, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (4, 2, 4), (8, 1, 2), 0 ), reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0) class GroundingNew(nn.Module): def __init__(self, cfgT, cfgI, heads=1): super(GroundingNew, self).__init__() projection = cfgI.hidden_size // 2 self.num_attention_heads = heads self.attention_head_size = int(projection // self.num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.Q = nn.Linear(cfgT.hidden_size, self.all_head_size) self.K = nn.Linear(cfgI.hidden_size, self.all_head_size) self.cfgT = cfgT self.cfgI = cfgI def transpose(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1, input_2): primals_1 = self.Q.weight primals_2 = self.Q.bias primals_4 = self.K.weight primals_5 = self.K.bias primals_3 = input_0 primals_6 = input_1 primals_7 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
necla-ml/ML-Vision
Grounding
false
7,377
[ "BSD-3-Clause" ]
1
66229b29fc0f67c75dbe6304cdb8c5e93fe0bacf
https://github.com/necla-ml/ML-Vision/tree/66229b29fc0f67c75dbe6304cdb8c5e93fe0bacf
SimpleASinModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/po/cpoegfcicyuorm7m4kiifuei33mjylvd7jlimgcjbcyazqqbkuwv.py # Topologically Sorted Source Nodes: [add, asin], Original ATen: [aten.add, aten.asin] # Source node to ATen node mapping: # add => add # asin => asin # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %asin : [num_users=1] = call_function[target=torch.ops.aten.asin.default](args = (%add,), kwargs = {}) triton_poi_fused_add_asin_0 = async_compile.triton('triton_poi_fused_add_asin_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_asin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_asin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.asin(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, asin], Original ATen: [aten.add, aten.asin] stream0 = get_raw_stream(0) triton_poi_fused_add_asin_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleASinModule(torch.nn.Module): def __init__(self): super(SimpleASinModule, self).__init__() def forward(self, a): return torch.asin(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_asin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.asin(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_asin_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleASinModuleNew(torch.nn.Module): def __init__(self): super(SimpleASinModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleASinModule
false
7,378
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleAvgPool1dModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xk/cxkmworteiawp27uz6szuij56323prklcytmzy4oalfulieyhlxw.py # Topologically Sorted Source Nodes: [avg_pool1d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool1d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze, [1, 4], [1, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool1d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 1), (1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleAvgPool1dModule(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super(SimpleAvgPool1dModule, self).__init__() self.kernel_size = kernel_size self.padding = padding self.stride = stride def forward(self, inputs): return F.avg_pool1d(inputs, self.kernel_size, padding=self.padding, stride=self.stride) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 1), (1, 1), 0), class SimpleAvgPool1dModuleNew(torch.nn.Module): def __init__(self, kernel_size, stride=None, padding=0): super(SimpleAvgPool1dModuleNew, self).__init__() self.kernel_size = kernel_size self.padding = padding self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleAvgPool1dModule
false
7,379
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
MixtureDensityHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ag/cagauppfc5dyrage7yy5leg6gsn632zg5ott3n34jtilpqfqtgis.py # Topologically Sorted Source Nodes: [elu, add, sigma_1], Original ATen: [aten.elu, aten.add] # Source node to ATen node mapping: # add => add # elu => expm1, gt, mul, mul_2, where # sigma_1 => add_1 # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-15), kwargs = {}) triton_poi_fused_add_elu_0 = async_compile.triton('triton_poi_fused_add_elu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tmp9 = 1e-15 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x0), tmp10, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pi], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sigma], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [elu, add, sigma_1], Original ATen: [aten.elu, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_elu_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_6 del primals_7 return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from torch.autograd import Variable from torch.distributions import Categorical class MixtureDensityHead(nn.Module): def __init__(self, config: 'DictConfig', **kwargs): self.hparams = config super().__init__() self._build_network() def _build_network(self): self.pi = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.pi.weight) self.sigma = nn.Linear(self.hparams.input_dim, self.hparams. num_gaussian, bias=self.hparams.sigma_bias_flag) self.mu = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.mu.weight) if self.hparams.mu_bias_init is not None: for i, bias in enumerate(self.hparams.mu_bias_init): nn.init.constant_(self.mu.bias[i], bias) def forward(self, x): pi = self.pi(x) sigma = self.sigma(x) sigma = nn.ELU()(sigma) + 1 + 1e-15 mu = self.mu(x) return pi, sigma, mu def gaussian_probability(self, sigma, mu, target, log=False): """Returns the probability of `target` given MoG parameters `sigma` and `mu`. Arguments: sigma (BxGxO): The standard deviation of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. mu (BxGxO): The means of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. target (BxI): A batch of target. B is the batch size and I is the number of input dimensions. Returns: probabilities (BxG): The probability of each point in the probability of the distribution in the corresponding sigma/mu index. """ target = target.expand_as(sigma) if log: ret = -torch.log(sigma) - 0.5 * LOG2PI - 0.5 * torch.pow(( target - mu) / sigma, 2) else: ret = ONEOVERSQRT2PI / sigma * torch.exp(-0.5 * ((target - mu) / sigma) ** 2) return ret def log_prob(self, pi, sigma, mu, y): log_component_prob = self.gaussian_probability(sigma, mu, y, log=True) log_mix_prob = torch.log(nn.functional.gumbel_softmax(pi, tau=self. hparams.softmax_temperature, dim=-1) + 1e-15) return torch.logsumexp(log_component_prob + log_mix_prob, dim=-1) def sample(self, pi, sigma, mu): """Draw samples from a MoG.""" categorical = Categorical(pi) pis = categorical.sample().unsqueeze(1) sample = Variable(sigma.data.new(sigma.size(0), 1).normal_()) sample = sample * sigma.gather(1, pis) + mu.gather(1, pis) return sample def generate_samples(self, pi, sigma, mu, n_samples=None): if n_samples is None: n_samples = self.hparams.n_samples samples = [] softmax_pi = nn.functional.gumbel_softmax(pi, tau=self.hparams. softmax_temperature, dim=-1) assert (softmax_pi < 0).sum().item( ) == 0, 'pi parameter should not have negative' for _ in range(n_samples): samples.append(self.sample(softmax_pi, sigma, mu)) samples = torch.cat(samples, dim=1) return samples def generate_point_predictions(self, pi, sigma, mu, n_samples=None): samples = self.generate_samples(pi, sigma, mu, n_samples) if self.hparams.central_tendency == 'mean': y_hat = torch.mean(samples, dim=-1) elif self.hparams.central_tendency == 'median': y_hat = torch.median(samples, dim=-1).values return y_hat.unsqueeze(1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(input_dim=4, num_gaussian=4, sigma_bias_flag=4, mu_bias_init=[4, 4])}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.autograd import Variable from torch.distributions import Categorical assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tmp9 = 1e-15 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_elu_0[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf3) del primals_6 del primals_7 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class MixtureDensityHeadNew(nn.Module): def __init__(self, config: 'DictConfig', **kwargs): self.hparams = config super().__init__() self._build_network() def _build_network(self): self.pi = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.pi.weight) self.sigma = nn.Linear(self.hparams.input_dim, self.hparams. num_gaussian, bias=self.hparams.sigma_bias_flag) self.mu = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.mu.weight) if self.hparams.mu_bias_init is not None: for i, bias in enumerate(self.hparams.mu_bias_init): nn.init.constant_(self.mu.bias[i], bias) def gaussian_probability(self, sigma, mu, target, log=False): """Returns the probability of `target` given MoG parameters `sigma` and `mu`. Arguments: sigma (BxGxO): The standard deviation of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. mu (BxGxO): The means of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. target (BxI): A batch of target. B is the batch size and I is the number of input dimensions. Returns: probabilities (BxG): The probability of each point in the probability of the distribution in the corresponding sigma/mu index. """ target = target.expand_as(sigma) if log: ret = -torch.log(sigma) - 0.5 * LOG2PI - 0.5 * torch.pow(( target - mu) / sigma, 2) else: ret = ONEOVERSQRT2PI / sigma * torch.exp(-0.5 * ((target - mu) / sigma) ** 2) return ret def log_prob(self, pi, sigma, mu, y): log_component_prob = self.gaussian_probability(sigma, mu, y, log=True) log_mix_prob = torch.log(nn.functional.gumbel_softmax(pi, tau=self. hparams.softmax_temperature, dim=-1) + 1e-15) return torch.logsumexp(log_component_prob + log_mix_prob, dim=-1) def sample(self, pi, sigma, mu): """Draw samples from a MoG.""" categorical = Categorical(pi) pis = categorical.sample().unsqueeze(1) sample = Variable(sigma.data.new(sigma.size(0), 1).normal_()) sample = sample * sigma.gather(1, pis) + mu.gather(1, pis) return sample def generate_samples(self, pi, sigma, mu, n_samples=None): if n_samples is None: n_samples = self.hparams.n_samples samples = [] softmax_pi = nn.functional.gumbel_softmax(pi, tau=self.hparams. softmax_temperature, dim=-1) assert (softmax_pi < 0).sum().item( ) == 0, 'pi parameter should not have negative' for _ in range(n_samples): samples.append(self.sample(softmax_pi, sigma, mu)) samples = torch.cat(samples, dim=1) return samples def generate_point_predictions(self, pi, sigma, mu, n_samples=None): samples = self.generate_samples(pi, sigma, mu, n_samples) if self.hparams.central_tendency == 'mean': y_hat = torch.mean(samples, dim=-1) elif self.hparams.central_tendency == 'median': y_hat = torch.median(samples, dim=-1).values return y_hat.unsqueeze(1) def forward(self, input_0): primals_1 = self.pi.weight primals_2 = self.pi.bias primals_4 = self.sigma.weight primals_5 = self.sigma.bias primals_6 = self.mu.weight primals_7 = self.mu.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1], output[2]
Actis92/pytorch_tabular
MixtureDensityHead
false
7,380
[ "MIT" ]
1
78dabf5e7b97d8ff24db4bc83d9d0a2273941bbe
https://github.com/Actis92/pytorch_tabular/tree/78dabf5e7b97d8ff24db4bc83d9d0a2273941bbe
SimpleACosModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ku/ckuxvoxhmi6ercpm3zqehlfc3vctrl7fwhgkl4t7idy4tmzkn672.py # Topologically Sorted Source Nodes: [add, acos], Original ATen: [aten.add, aten.acos] # Source node to ATen node mapping: # acos => acos # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %acos : [num_users=1] = call_function[target=torch.ops.aten.acos.default](args = (%add,), kwargs = {}) triton_poi_fused_acos_add_0 = async_compile.triton('triton_poi_fused_acos_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_acos_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_acos_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.acos(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, acos], Original ATen: [aten.add, aten.acos] stream0 = get_raw_stream(0) triton_poi_fused_acos_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleACosModule(torch.nn.Module): def __init__(self): super(SimpleACosModule, self).__init__() def forward(self, a): return torch.acos(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_acos_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.acos(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_acos_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleACosModuleNew(torch.nn.Module): def __init__(self): super(SimpleACosModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleACosModule
false
7,381
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleATanModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ne/cnela3lyozhtiqn5hqe56wagkn5vzngrj3onlkngmltrgqlvhlwc.py # Topologically Sorted Source Nodes: [add, atan], Original ATen: [aten.add, aten.atan] # Source node to ATen node mapping: # add => add # atan => atan # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %atan : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%add,), kwargs = {}) triton_poi_fused_add_atan_0 = async_compile.triton('triton_poi_fused_add_atan_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_atan_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_atan_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.atan(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, atan], Original ATen: [aten.add, aten.atan] stream0 = get_raw_stream(0) triton_poi_fused_add_atan_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleATanModule(torch.nn.Module): def __init__(self): super(SimpleATanModule, self).__init__() def forward(self, a): return torch.atan(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_atan_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.atan(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_atan_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleATanModuleNew(torch.nn.Module): def __init__(self): super(SimpleATanModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleATanModule
false
7,382
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleAddMmModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ph/cphyvqksaznjc5f5gstivhj5vszkuncctuzvaegazln3taw555sz.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, addmm], Original ATen: [aten.add, aten.addmm] extern_kernels.addmm(buf0, arg1_1, arg2_1, alpha=1, beta=1, out=buf1) del arg1_1 del arg2_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleAddMmModule(torch.nn.Module): def __init__(self, alpha=1, beta=1): super(SimpleAddMmModule, self).__init__() self.alpha = alpha self.beta = beta def forward(self, a, b, c): return (a + a).addmm(b, c) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf0, arg1_1, arg2_1, alpha=1, beta=1, out=buf1) del arg1_1 del arg2_1 del buf0 return buf1, class SimpleAddMmModuleNew(torch.nn.Module): def __init__(self, alpha=1, beta=1): super(SimpleAddMmModuleNew, self).__init__() self.alpha = alpha self.beta = beta def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
opti-mix/glow
SimpleAddMmModule
false
7,383
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleAbsModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/f4/cf4pkmxbqyp3rqsgl3bdbzsxemrohluent2vyggztgix67lsa757.py # Topologically Sorted Source Nodes: [add, abs_1], Original ATen: [aten.add, aten.abs] # Source node to ATen node mapping: # abs_1 => abs_1 # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%add,), kwargs = {}) triton_poi_fused_abs_add_0 = async_compile.triton('triton_poi_fused_abs_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.abs(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, abs_1], Original ATen: [aten.add, aten.abs] stream0 = get_raw_stream(0) triton_poi_fused_abs_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleAbsModule(torch.nn.Module): def __init__(self): super(SimpleAbsModule, self).__init__() def forward(self, a): return torch.abs(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.abs(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleAbsModuleNew(torch.nn.Module): def __init__(self): super(SimpleAbsModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleAbsModule
false
7,384
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleCeilModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ga/cgacbgfdnfffemaen3ot3b3225mehdgkmdv6g4yho2qdlicpwwlu.py # Topologically Sorted Source Nodes: [c, ceil], Original ATen: [aten.add, aten.ceil] # Source node to ATen node mapping: # c => add # ceil => ceil # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %ceil : [num_users=1] = call_function[target=torch.ops.aten.ceil.default](args = (%add,), kwargs = {}) triton_poi_fused_add_ceil_0 = async_compile.triton('triton_poi_fused_add_ceil_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_ceil_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_ceil_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.ceil(tmp2) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c, ceil], Original ATen: [aten.add, aten.ceil] stream0 = get_raw_stream(0) triton_poi_fused_add_ceil_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleCeilModule(torch.nn.Module): def forward(self, a, b): c = a + b return torch.ceil(c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_ceil_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.ceil(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_ceil_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleCeilModuleNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleCeilModule
false
7,385
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
OneTupleModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sd/csdfq3pwxme6skykh2xidrwr6t4ujkpebegmshqc4a6ptefksvl7.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.mul] # Source node to ATen node mapping: # y => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class OneTupleModule(torch.nn.Module): def __init__(self): super(OneTupleModule, self).__init__() def forward(self, x): y = 2 * x return y, def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class OneTupleModuleNew(torch.nn.Module): def __init__(self): super(OneTupleModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
OneTupleModule
false
7,386
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleBmmModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/h4/ch4e5ehu5tf4fxe3qcp5wtlsj4zjteppgjnu5d6xg564tkpvpxz6.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, bmm], Original ATen: [aten.add, aten.bmm] extern_kernels.bmm(buf0, arg1_1, out=buf1) del arg1_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleBmmModule(torch.nn.Module): def forward(self, a, b): return (a + a).bmm(b) def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf0, arg1_1, out=buf1) del arg1_1 del buf0 return buf1, class SimpleBmmModuleNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleBmmModule
false
7,387
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
ConvGRUCell
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yr/cyrsfiqcqep5ianv6lfxy43inavacsrots2hnf6qyokhutlu5ocy.py # Topologically Sorted Source Nodes: [h_prev], Original ATen: [aten.new_zeros] # Source node to ATen node mapping: # h_prev => full_default # Graph fragment: # %full_default : [num_users=4] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/qv/cqvqoxzqwduraftvkpveor26twbksdy4eroiqe5gkhhcqhmp4fly.py # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] # Source node to ATen node mapping: # combined => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 8 x0 = xindex % 16 x2 = (xindex // 128) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + (x3), tmp12, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/cx/ccxv3djgp4azaupyxj6k27iatx6fdzs4dd65zqaoeei7no4esmab.py # Topologically Sorted Source Nodes: [conv2d, combined_conv], Original ATen: [aten.convolution, aten.sigmoid] # Source node to ATen node mapping: # combined_conv => sigmoid # conv2d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_sigmoid_2 = async_compile.triton('triton_poi_fused_convolution_sigmoid_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 8 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + (x3), tmp3, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nn/cnn7cjpchplaegvrmuuug4x3kbotklqczshw3hbasnx2ijtf4yn3.py # Topologically Sorted Source Nodes: [conv2d_1, conv2d_2, mul, add, h_, sub, mul_1, mul_2, h_cur], Original ATen: [aten.convolution, aten.mul, aten.add, aten.tanh, aten.rsub] # Source node to ATen node mapping: # add => add # conv2d_1 => convolution_1 # conv2d_2 => convolution_2 # h_ => tanh # h_cur => add_1 # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # sub => sub # Graph fragment: # %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%full_default, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, %convolution_2), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %mul), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %getitem), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %full_default), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {}) triton_poi_fused_add_convolution_mul_rsub_tanh_3 = async_compile.triton('triton_poi_fused_add_convolution_mul_rsub_tanh_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_rsub_tanh_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mul_rsub_tanh_3(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + (x4), xmask) tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x3 + (128*x2)), xmask) tmp9 = tl.load(in_ptr2 + (64 + x3 + (128*x2)), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp9 * tmp5 tmp11 = tmp2 + tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = tmp8 * tmp12 tmp14 = 0.0 tmp15 = tmp6 * tmp14 tmp16 = tmp13 + tmp15 tl.store(in_out_ptr0 + (x4), tmp2, xmask) tl.store(in_out_ptr1 + (x4), tmp5, xmask) tl.store(out_ptr0 + (x4), tmp8, xmask) tl.store(out_ptr1 + (x4), tmp16, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_3, (8, ), (1, )) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [h_prev], Original ATen: [aten.new_zeros] stream0 = get_raw_stream(0) triton_poi_fused_new_zeros_0.run(buf0, 256, grid=grid(256), stream=stream0) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_1, buf1, 512, grid=grid(512), stream=stream0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 4, 4), (128, 16, 4, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [conv2d, combined_conv], Original ATen: [aten.convolution, aten.sigmoid] triton_poi_fused_convolution_sigmoid_2.run(buf3, primals_3, 512, grid=grid(512), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf6 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse buf7 = buf6; del buf6 # reuse buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, conv2d_2, mul, add, h_, sub, mul_1, mul_2, h_cur], Original ATen: [aten.convolution, aten.mul, aten.add, aten.tanh, aten.rsub] triton_poi_fused_add_convolution_mul_rsub_tanh_3.run(buf5, buf7, primals_5, primals_7, buf3, buf8, buf9, 256, grid=grid(256), stream=stream0) del primals_5 del primals_7 return (buf9, primals_1, primals_2, primals_4, primals_6, buf0, buf1, buf3, reinterpret_tensor(buf3, (4, 4, 4, 4), (128, 16, 4, 1), 64), buf5, buf7, buf8, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((8, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn as nn import torch.nn.functional as F def one_param(m): """First parameter in `m`""" return next(m.parameters()) class ConvGRUCell(nn.Module): def __init__(self, input_dim, hidden_dim, kernel_size=(3, 3), bias=True, activation=F.tanh, batchnorm=False): """ Initialize ConvGRU cell. Parameters ---------- input_dim: int Number of channels of input tensor. hidden_dim: int Number of channels of hidden state. kernel_size: (int, int) Size of the convolutional kernel. bias: bool Whether or not to add the bias. """ super().__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.kernel_size = kernel_size if isinstance(kernel_size, (tuple, list) ) else [kernel_size] * 2 self.padding = self.kernel_size[0] // 2, self.kernel_size[1] // 2 self.bias = bias self.activation = activation self.batchnorm = batchnorm self.conv_zr = nn.Conv2d(in_channels=self.input_dim + self. hidden_dim, out_channels=2 * self.hidden_dim, kernel_size=self. kernel_size, padding=self.padding, bias=self.bias) self.conv_h1 = nn.Conv2d(in_channels=self.input_dim, out_channels= self.hidden_dim, kernel_size=self.kernel_size, padding=self. padding, bias=self.bias) self.conv_h2 = nn.Conv2d(in_channels=self.hidden_dim, out_channels= self.hidden_dim, kernel_size=self.kernel_size, padding=self. padding, bias=self.bias) self.reset_parameters() def forward(self, input, h_prev=None): if h_prev is None: h_prev = self.init_hidden(input) combined = torch.cat((input, h_prev), dim=1) combined_conv = F.sigmoid(self.conv_zr(combined)) z, r = torch.split(combined_conv, self.hidden_dim, dim=1) h_ = self.activation(self.conv_h1(input) + r * self.conv_h2(h_prev)) h_cur = (1 - z) * h_ + z * h_prev return h_cur def init_hidden(self, input): bs, _ch, h, w = input.shape return one_param(self).new_zeros(bs, self.hidden_dim, h, w) def reset_parameters(self): nn.init.xavier_uniform_(self.conv_zr.weight, gain=nn.init. calculate_gain('tanh')) self.conv_zr.bias.data.zero_() nn.init.xavier_uniform_(self.conv_h1.weight, gain=nn.init. calculate_gain('tanh')) self.conv_h1.bias.data.zero_() nn.init.xavier_uniform_(self.conv_h2.weight, gain=nn.init. calculate_gain('tanh')) self.conv_h2.bias.data.zero_() if self.batchnorm: self.bn1.reset_parameters() self.bn2.reset_parameters() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = 0.0 tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp6, tmp9, tmp10) tmp12 = tl.where(tmp4, tmp5, tmp11) tl.store(out_ptr0 + x3, tmp12, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x3, tmp3, xmask) @triton.jit def triton_poi_fused_add_convolution_mul_rsub_tanh_3(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x4, xmask) tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x3 + 128 * x2), xmask) tmp9 = tl.load(in_ptr2 + (64 + x3 + 128 * x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = 1.0 tmp8 = tmp7 - tmp6 tmp10 = tmp9 * tmp5 tmp11 = tmp2 + tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = tmp8 * tmp12 tmp14 = 0.0 tmp15 = tmp6 * tmp14 tmp16 = tmp13 + tmp15 tl.store(in_out_ptr0 + x4, tmp2, xmask) tl.store(in_out_ptr1 + x4, tmp5, xmask) tl.store(out_ptr0 + x4, tmp8, xmask) tl.store(out_ptr1 + x4, tmp16, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(256)](buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](primals_1, buf1, 512, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 4, 4), (128, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_sigmoid_2[grid(512)](buf3, primals_3, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf4 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf6 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_convolution_mul_rsub_tanh_3[grid(256)](buf5, buf7, primals_5, primals_7, buf3, buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 del primals_7 return (buf9, primals_1, primals_2, primals_4, primals_6, buf0, buf1, buf3, reinterpret_tensor(buf3, (4, 4, 4, 4), (128, 16, 4, 1), 64), buf5, buf7, buf8) def one_param(m): """First parameter in `m`""" return next(m.parameters()) class ConvGRUCellNew(nn.Module): def __init__(self, input_dim, hidden_dim, kernel_size=(3, 3), bias=True, activation=F.tanh, batchnorm=False): """ Initialize ConvGRU cell. Parameters ---------- input_dim: int Number of channels of input tensor. hidden_dim: int Number of channels of hidden state. kernel_size: (int, int) Size of the convolutional kernel. bias: bool Whether or not to add the bias. """ super().__init__() self.input_dim = input_dim self.hidden_dim = hidden_dim self.kernel_size = kernel_size if isinstance(kernel_size, (tuple, list) ) else [kernel_size] * 2 self.padding = self.kernel_size[0] // 2, self.kernel_size[1] // 2 self.bias = bias self.activation = activation self.batchnorm = batchnorm self.conv_zr = nn.Conv2d(in_channels=self.input_dim + self. hidden_dim, out_channels=2 * self.hidden_dim, kernel_size=self. kernel_size, padding=self.padding, bias=self.bias) self.conv_h1 = nn.Conv2d(in_channels=self.input_dim, out_channels= self.hidden_dim, kernel_size=self.kernel_size, padding=self. padding, bias=self.bias) self.conv_h2 = nn.Conv2d(in_channels=self.hidden_dim, out_channels= self.hidden_dim, kernel_size=self.kernel_size, padding=self. padding, bias=self.bias) self.reset_parameters() def init_hidden(self, input): bs, _ch, h, w = input.shape return one_param(self).new_zeros(bs, self.hidden_dim, h, w) def reset_parameters(self): nn.init.xavier_uniform_(self.conv_zr.weight, gain=nn.init. calculate_gain('tanh')) self.conv_zr.bias.data.zero_() nn.init.xavier_uniform_(self.conv_h1.weight, gain=nn.init. calculate_gain('tanh')) self.conv_h1.bias.data.zero_() nn.init.xavier_uniform_(self.conv_h2.weight, gain=nn.init. calculate_gain('tanh')) self.conv_h2.bias.data.zero_() if self.batchnorm: self.bn1.reset_parameters() self.bn2.reset_parameters() def forward(self, input_0): primals_2 = self.conv_zr.weight primals_3 = self.conv_zr.bias primals_4 = self.conv_h1.weight primals_5 = self.conv_h1.bias primals_6 = self.conv_h2.weight primals_7 = self.conv_h2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
openclimatefix/MetNet
ConvGRUCell
false
7,388
[ "MIT" ]
1
06eed550e93da6325641958b0d36c15adde1d928
https://github.com/openclimatefix/MetNet/tree/06eed550e93da6325641958b0d36c15adde1d928
SimpleAndModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vq/cvqibjhky7ckcngwo5vx6iwslb3qukduvryszxa4md2bb7f76fpd.py # Topologically Sorted Source Nodes: [c, logical_and_1], Original ATen: [aten.logical_and] # Source node to ATen node mapping: # c => logical_and # logical_and_1 => logical_and_1 # Graph fragment: # %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%arg1_1, %arg0_1), kwargs = {}) # %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%logical_and, %logical_and), kwargs = {}) triton_poi_fused_logical_and_0 = async_compile.triton('triton_poi_fused_logical_and_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logical_and_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_logical_and_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = (tmp0 != 0) tmp3 = (tmp2 != 0) tmp4 = tmp1 & tmp3 tmp5 = tmp4 & tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [c, logical_and_1], Original ATen: [aten.logical_and] stream0 = get_raw_stream(0) triton_poi_fused_logical_and_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleAndModule(torch.nn.Module): def __init__(self): super(SimpleAndModule, self).__init__() def forward(self, a, b): c = torch.logical_and(a, b) return torch.logical_and(c, c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_and_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 != 0 tmp3 = tmp2 != 0 tmp4 = tmp1 & tmp3 tmp5 = tmp4 & tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_and_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleAndModuleNew(torch.nn.Module): def __init__(self): super(SimpleAndModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleAndModule
false
7,389
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleClampMinModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/gx/cgxeuyalbe255nl7kl2wupslogkj7sqz4dk77bpxwhn37372vatf.py # Topologically Sorted Source Nodes: [clamp_min], Original ATen: [aten.clamp_min] # Source node to ATen node mapping: # clamp_min => clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 4), kwargs = {}) triton_poi_fused_clamp_min_0 = async_compile.triton('triton_poi_fused_clamp_min_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_min_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp_min], Original ATen: [aten.clamp_min] stream0 = get_raw_stream(0) triton_poi_fused_clamp_min_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleClampMinModel(torch.nn.Module): def __init__(self, min): super(SimpleClampMinModel, self).__init__() self.min = min def forward(self, input): return torch.clamp_min(input, self.min) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'min': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleClampMinModelNew(torch.nn.Module): def __init__(self, min): super(SimpleClampMinModelNew, self).__init__() self.min = min def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleClampMinModel
false
7,390
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
EnsembleModel
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yl/cylmztthhqsuazxqdnn3s5mwrv4ht4vltlbbcl2pu45p6l64wrd3.py # Topologically Sorted Source Nodes: [sub, inputs], Original ATen: [aten.sub, aten.div] # Source node to ATen node mapping: # inputs => div # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %primals_1), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %primals_3), kwargs = {}) triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/su/csuizz2ownuafectvusuawmeivm2ftgfnsijcxph6sw2bq3rztvj.py # Topologically Sorted Source Nodes: [inputs_1, inputs_2], Original ATen: [aten.add, aten.silu] # Source node to ATen node mapping: # inputs_1 => add # inputs_2 => mul, sigmoid # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm, %primals_5), kwargs = {}) # %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %sigmoid), kwargs = {}) triton_poi_fused_add_silu_1 = async_compile.triton('triton_poi_fused_add_silu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_silu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_silu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 200 x2 = (xindex // 800) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (200*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/zb/czbdw5y33wdeoyorbmsurpyiyxyzndcnzhlq75se72y2xog2pq6h.py # Topologically Sorted Source Nodes: [inputs_13], Original ATen: [aten.add] # Source node to ATen node mapping: # inputs_13 => add_6 # Graph fragment: # %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%bmm_6, %primals_17), kwargs = {}) triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = (xindex // 32) tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (8*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/gv/cgvoihs2me7l4uwmhcvmzqpxv3oby3zgl3mihluiy7ns4ykscrtg.py # Topologically Sorted Source Nodes: [sub_1, softplus, logvar_1, sub_3, softplus_1, logvar_2], Original ATen: [aten.sub, aten.softplus, aten.add] # Source node to ATen node mapping: # logvar_1 => sub_2 # logvar_2 => add_7 # softplus => exp, gt, log1p, where # softplus_1 => exp_1, gt_1, log1p_1, where_1 # sub_1 => sub_1 # sub_3 => sub_3 # Graph fragment: # %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_18, %slice_6), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sub_1, 20), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %sub_1, %log1p), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_18, %where), kwargs = {}) # %sub_3 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %primals_19), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {}) # %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%sub_3, 20), kwargs = {}) # %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %sub_3, %log1p_1), kwargs = {}) # %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_19, %where_1), kwargs = {}) triton_poi_fused_add_softplus_sub_3 = async_compile.triton('triton_poi_fused_add_softplus_sub_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_softplus_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_softplus_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + x0 + (8*x1)), xmask) tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 20.0 tmp5 = tmp2 > tmp4 tmp6 = tl_math.exp(tmp2) tmp7 = libdevice.log1p(tmp6) tmp8 = tl.where(tmp5, tmp2, tmp7) tmp9 = tmp0 - tmp8 tmp10 = tmp9 - tmp3 tmp11 = tmp10 > tmp4 tmp12 = tl_math.exp(tmp10) tmp13 = libdevice.log1p(tmp12) tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = tmp3 + tmp14 tl.store(out_ptr0 + (x2), tmp2, xmask) tl.store(out_ptr1 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args args.clear() assert_size_stride(primals_1, (4, ), (1, )) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 200), (800, 200, 1)) assert_size_stride(primals_5, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_6, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_7, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_8, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_9, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_10, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_11, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_12, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_13, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_14, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_15, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_16, (4, 200, 8), (1600, 8, 1)) assert_size_stride(primals_17, (4, 1, 8), (8, 8, 1)) assert_size_stride(primals_18, (1, 4), (4, 1)) assert_size_stride(primals_19, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, inputs], Original ATen: [aten.sub, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_sub_0.run(primals_2, primals_1, primals_3, buf0, 16, grid=grid(16), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (0, 4, 1), 0), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_1, inputs_2], Original ATen: [aten.add, aten.silu] triton_poi_fused_add_silu_1.run(buf1, primals_5, buf2, 3200, grid=grid(3200), stream=stream0) buf3 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm] extern_kernels.bmm(buf2, primals_6, out=buf3) buf4 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_3, inputs_4], Original ATen: [aten.add, aten.silu] triton_poi_fused_add_silu_1.run(buf3, primals_7, buf4, 3200, grid=grid(3200), stream=stream0) buf5 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm] extern_kernels.bmm(buf4, primals_8, out=buf5) buf6 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_5, inputs_6], Original ATen: [aten.add, aten.silu] triton_poi_fused_add_silu_1.run(buf5, primals_9, buf6, 3200, grid=grid(3200), stream=stream0) buf7 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm] extern_kernels.bmm(buf6, primals_10, out=buf7) buf8 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_7, inputs_8], Original ATen: [aten.add, aten.silu] triton_poi_fused_add_silu_1.run(buf7, primals_11, buf8, 3200, grid=grid(3200), stream=stream0) buf9 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm] extern_kernels.bmm(buf8, primals_12, out=buf9) buf10 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_9, inputs_10], Original ATen: [aten.add, aten.silu] triton_poi_fused_add_silu_1.run(buf9, primals_13, buf10, 3200, grid=grid(3200), stream=stream0) buf11 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm] extern_kernels.bmm(buf10, primals_14, out=buf11) buf12 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) # Topologically Sorted Source Nodes: [inputs_11, inputs_12], Original ATen: [aten.add, aten.silu] triton_poi_fused_add_silu_1.run(buf11, primals_15, buf12, 3200, grid=grid(3200), stream=stream0) buf13 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm] extern_kernels.bmm(buf12, primals_16, out=buf13) buf14 = buf13; del buf13 # reuse # Topologically Sorted Source Nodes: [inputs_13], Original ATen: [aten.add] triton_poi_fused_add_2.run(buf14, primals_17, 128, grid=grid(128), stream=stream0) del primals_17 buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub_1, softplus, logvar_1, sub_3, softplus_1, logvar_2], Original ATen: [aten.sub, aten.softplus, aten.add] triton_poi_fused_add_softplus_sub_3.run(primals_18, buf14, primals_19, buf15, buf16, 64, grid=grid(64), stream=stream0) return (reinterpret_tensor(buf14, (4, 4, 4), (32, 8, 1), 0), buf16, primals_5, primals_7, primals_9, primals_11, primals_13, primals_15, primals_18, primals_19, buf0, buf1, buf3, buf5, buf7, buf9, buf11, buf15, reinterpret_tensor(buf12, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_16, (4, 8, 200), (1600, 1, 8), 0), reinterpret_tensor(buf10, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_14, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf8, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_12, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf6, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_10, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf4, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_8, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf2, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_6, (4, 200, 200), (40000, 1, 200), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 200), (800, 200, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1, 200), (200, 200, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 200, 200), (40000, 200, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 1, 200), (200, 200, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 200, 200), (40000, 200, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, 1, 200), (200, 200, 1), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((4, 200, 200), (40000, 200, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((4, 1, 200), (200, 200, 1), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((4, 200, 200), (40000, 200, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((4, 1, 200), (200, 200, 1), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((4, 200, 200), (40000, 200, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((4, 1, 200), (200, 200, 1), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((4, 200, 8), (1600, 8, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((4, 1, 8), (8, 8, 1), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import numpy as np import torch.nn.functional as F def truncated_standardized_normal(shape, a=-2.0, b=2.0): a = torch.Tensor([a]) b = torch.Tensor([b]) U = torch.distributions.uniform.Uniform(0, 1) u = U.sample(shape) Fa = 0.5 * (1 + torch.erf(a / math.sqrt(2))) Fb = 0.5 * (1 + torch.erf(b / math.sqrt(2))) return math.sqrt(2) * torch.erfinv(2 * ((Fb - Fa) * u + Fa) - 1) def get_affine_params(ensemble_size, in_features, out_features): w = truncated_standardized_normal(shape=(ensemble_size, in_features, out_features)) / (2.0 * math.sqrt(in_features)) w = torch.nn.Parameter(w) b = torch.nn.Parameter(torch.zeros(ensemble_size, 1, out_features, dtype=torch.float32)) return w, b class EnsembleModel(torch.nn.Module): def __init__(self, ensemble_size, input_num, output_num, hidden_num=200): super().__init__() self.num_nets = ensemble_size self.input_num = input_num self.output_num = output_num self.lin0_w, self.lin0_b = get_affine_params(ensemble_size, input_num, hidden_num) self.lin1_w, self.lin1_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin2_w, self.lin2_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin3_w, self.lin3_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin4_w, self.lin4_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin5_w, self.lin5_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin6_w, self.lin6_b = get_affine_params(ensemble_size, hidden_num, 2 * output_num) self.inputs_mu = torch.nn.Parameter(torch.zeros(input_num), requires_grad=False) self.inputs_sigma = torch.nn.Parameter(torch.zeros(input_num), requires_grad=False) self.max_logvar = torch.nn.Parameter(torch.ones(1, output_num, dtype=torch.float32) / 2.0) self.min_logvar = torch.nn.Parameter(-torch.ones(1, output_num, dtype=torch.float32) * 10.0) def compute_decays(self): loss = 0.0 loss += 1.0 * (self.lin0_w ** 2).sum() loss += 1.0 * (self.lin1_w ** 2).sum() loss += 1.0 * (self.lin2_w ** 2).sum() loss += 1.0 * (self.lin3_w ** 2).sum() loss += 1.0 * (self.lin4_w ** 2).sum() loss += 1.0 * (self.lin5_w ** 2).sum() loss += 1.0 * (self.lin6_w ** 2).sum() return 1e-05 * loss / 2.0 def fit_input_stats(self, data): mu = np.mean(data, axis=0, keepdims=True) sigma = np.std(data, axis=0, keepdims=True) sigma[sigma < 1e-12] = 1.0 self.inputs_mu.data = torch.from_numpy(mu).float() self.inputs_sigma.data = torch.from_numpy(sigma).float() def forward(self, inputs): inputs = (inputs - self.inputs_mu) / self.inputs_sigma inputs = inputs.matmul(self.lin0_w) + self.lin0_b inputs = F.silu(inputs) inputs = inputs.matmul(self.lin1_w) + self.lin1_b inputs = F.silu(inputs) inputs = inputs.matmul(self.lin2_w) + self.lin2_b inputs = F.silu(inputs) inputs = inputs.matmul(self.lin3_w) + self.lin3_b inputs = F.silu(inputs) inputs = inputs.matmul(self.lin4_w) + self.lin4_b inputs = F.silu(inputs) inputs = inputs.matmul(self.lin5_w) + self.lin5_b inputs = F.silu(inputs) inputs = inputs.matmul(self.lin6_w) + self.lin6_b mean = inputs[:, :, :self.output_num] logvar = inputs[:, :, self.output_num:] logvar = self.max_logvar - F.softplus(self.max_logvar - logvar) logvar = self.min_logvar + F.softplus(logvar - self.min_logvar) return mean, logvar def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'ensemble_size': 4, 'input_num': 4, 'output_num': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import numpy as np assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 / tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_silu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 200 x2 = xindex // 800 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 200 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 8 x2 = xindex // 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_softplus_sub_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = 20.0 tmp5 = tmp2 > tmp4 tmp6 = tl_math.exp(tmp2) tmp7 = libdevice.log1p(tmp6) tmp8 = tl.where(tmp5, tmp2, tmp7) tmp9 = tmp0 - tmp8 tmp10 = tmp9 - tmp3 tmp11 = tmp10 > tmp4 tmp12 = tl_math.exp(tmp10) tmp13 = libdevice.log1p(tmp12) tmp14 = tl.where(tmp11, tmp10, tmp13) tmp15 = tmp3 + tmp14 tl.store(out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr1 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 200), (800, 200, 1)) assert_size_stride(primals_5, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_6, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_7, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_8, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_9, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_10, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_11, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_12, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_13, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_14, (4, 200, 200), (40000, 200, 1)) assert_size_stride(primals_15, (4, 1, 200), (200, 200, 1)) assert_size_stride(primals_16, (4, 200, 8), (1600, 8, 1)) assert_size_stride(primals_17, (4, 1, 8), (8, 8, 1)) assert_size_stride(primals_18, (1, 4), (4, 1)) assert_size_stride(primals_19, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_sub_0[grid(16)](primals_2, primals_1, primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (0, 4, 1), 0 ), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) triton_poi_fused_add_silu_1[grid(3200)](buf1, primals_5, buf2, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) extern_kernels.bmm(buf2, primals_6, out=buf3) buf4 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) triton_poi_fused_add_silu_1[grid(3200)](buf3, primals_7, buf4, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) extern_kernels.bmm(buf4, primals_8, out=buf5) buf6 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) triton_poi_fused_add_silu_1[grid(3200)](buf5, primals_9, buf6, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) extern_kernels.bmm(buf6, primals_10, out=buf7) buf8 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) triton_poi_fused_add_silu_1[grid(3200)](buf7, primals_11, buf8, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) extern_kernels.bmm(buf8, primals_12, out=buf9) buf10 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) triton_poi_fused_add_silu_1[grid(3200)](buf9, primals_13, buf10, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf11 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) extern_kernels.bmm(buf10, primals_14, out=buf11) buf12 = empty_strided_cuda((4, 4, 200), (800, 200, 1), torch.float32) triton_poi_fused_add_silu_1[grid(3200)](buf11, primals_15, buf12, 3200, XBLOCK=128, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) extern_kernels.bmm(buf12, primals_16, out=buf13) buf14 = buf13 del buf13 triton_poi_fused_add_2[grid(128)](buf14, primals_17, 128, XBLOCK= 128, num_warps=4, num_stages=1) del primals_17 buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_softplus_sub_3[grid(64)](primals_18, buf14, primals_19, buf15, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) return (reinterpret_tensor(buf14, (4, 4, 4), (32, 8, 1), 0), buf16, primals_5, primals_7, primals_9, primals_11, primals_13, primals_15, primals_18, primals_19, buf0, buf1, buf3, buf5, buf7, buf9, buf11, buf15, reinterpret_tensor(buf12, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_16, (4, 8, 200), (1600, 1, 8), 0), reinterpret_tensor(buf10, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_14, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf8, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_12, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf6, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_10, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf4, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_8, (4, 200, 200), (40000, 1, 200), 0), reinterpret_tensor(buf2, (4, 200, 4), (800, 1, 200), 0), reinterpret_tensor(primals_6, (4, 200, 200), (40000, 1, 200), 0)) def truncated_standardized_normal(shape, a=-2.0, b=2.0): a = torch.Tensor([a]) b = torch.Tensor([b]) U = torch.distributions.uniform.Uniform(0, 1) u = U.sample(shape) Fa = 0.5 * (1 + torch.erf(a / math.sqrt(2))) Fb = 0.5 * (1 + torch.erf(b / math.sqrt(2))) return math.sqrt(2) * torch.erfinv(2 * ((Fb - Fa) * u + Fa) - 1) def get_affine_params(ensemble_size, in_features, out_features): w = truncated_standardized_normal(shape=(ensemble_size, in_features, out_features)) / (2.0 * math.sqrt(in_features)) w = torch.nn.Parameter(w) b = torch.nn.Parameter(torch.zeros(ensemble_size, 1, out_features, dtype=torch.float32)) return w, b class EnsembleModelNew(torch.nn.Module): def __init__(self, ensemble_size, input_num, output_num, hidden_num=200): super().__init__() self.num_nets = ensemble_size self.input_num = input_num self.output_num = output_num self.lin0_w, self.lin0_b = get_affine_params(ensemble_size, input_num, hidden_num) self.lin1_w, self.lin1_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin2_w, self.lin2_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin3_w, self.lin3_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin4_w, self.lin4_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin5_w, self.lin5_b = get_affine_params(ensemble_size, hidden_num, hidden_num) self.lin6_w, self.lin6_b = get_affine_params(ensemble_size, hidden_num, 2 * output_num) self.inputs_mu = torch.nn.Parameter(torch.zeros(input_num), requires_grad=False) self.inputs_sigma = torch.nn.Parameter(torch.zeros(input_num), requires_grad=False) self.max_logvar = torch.nn.Parameter(torch.ones(1, output_num, dtype=torch.float32) / 2.0) self.min_logvar = torch.nn.Parameter(-torch.ones(1, output_num, dtype=torch.float32) * 10.0) def compute_decays(self): loss = 0.0 loss += 1.0 * (self.lin0_w ** 2).sum() loss += 1.0 * (self.lin1_w ** 2).sum() loss += 1.0 * (self.lin2_w ** 2).sum() loss += 1.0 * (self.lin3_w ** 2).sum() loss += 1.0 * (self.lin4_w ** 2).sum() loss += 1.0 * (self.lin5_w ** 2).sum() loss += 1.0 * (self.lin6_w ** 2).sum() return 1e-05 * loss / 2.0 def fit_input_stats(self, data): mu = np.mean(data, axis=0, keepdims=True) sigma = np.std(data, axis=0, keepdims=True) sigma[sigma < 1e-12] = 1.0 self.inputs_mu.data = torch.from_numpy(mu).float() self.inputs_sigma.data = torch.from_numpy(sigma).float() def forward(self, input_0): primals_4 = self.lin0_w primals_5 = self.lin0_b primals_6 = self.lin1_w primals_7 = self.lin1_b primals_8 = self.lin2_w primals_9 = self.lin2_b primals_10 = self.lin3_w primals_11 = self.lin3_b primals_12 = self.lin4_w primals_13 = self.lin4_b primals_14 = self.lin5_w primals_15 = self.lin5_b primals_16 = self.lin6_w primals_17 = self.lin6_b primals_1 = self.inputs_mu primals_3 = self.inputs_sigma primals_18 = self.max_logvar primals_19 = self.min_logvar primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0], output[1]
numahha/wmopo
EnsembleModel
false
7,391
[ "MIT" ]
1
1557dab2e8168c1f2e53ffbc435b4000680f1d28
https://github.com/numahha/wmopo/tree/1557dab2e8168c1f2e53ffbc435b4000680f1d28
SimpleClampModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ym/cymkueyytwmyd6cqzabpgskqntypbctwgeke5274sffl2aogwlds.py # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp] # Source node to ATen node mapping: # clamp => clamp_max, clamp_min # Graph fragment: # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 4), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 4), kwargs = {}) triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 4.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = triton_helpers.minimum(tmp2, tmp1) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp] stream0 = get_raw_stream(0) triton_poi_fused_clamp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleClampModel(torch.nn.Module): def __init__(self, min, max): super(SimpleClampModel, self).__init__() self.min = min self.max = max def forward(self, input): return torch.clamp(input, self.min, self.max) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'min': 4, 'max': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = triton_helpers.minimum(tmp2, tmp1) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleClampModelNew(torch.nn.Module): def __init__(self, min, max): super(SimpleClampModelNew, self).__init__() self.min = min self.max = max def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleClampModel
false
7,392
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleExpModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/7g/c7gyxvpvdujjipzswxbu5tactwghjfb7nrevdzajpm4fzclaifhf.py # Topologically Sorted Source Nodes: [other, exp_1], Original ATen: [aten.exp] # Source node to ATen node mapping: # exp_1 => exp_1 # other => exp # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%exp,), kwargs = {}) triton_poi_fused_exp_0 = async_compile.triton('triton_poi_fused_exp_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.exp(tmp0) tmp2 = tl_math.exp(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [other, exp_1], Original ATen: [aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_exp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleExpModule(torch.nn.Module): def forward(self, input): other = torch.exp(input) return torch.exp(other) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = tl_math.exp(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleExpModuleNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleExpModule
false
7,393
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleGeluModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/cz/cczz2aaz3xffbl7w5gnijw32l5h4gkl36ofwyvf646samygcfe5i.py # Topologically Sorted Source Nodes: [add, gelu], Original ATen: [aten.add, aten.gelu] # Source node to ATen node mapping: # add => add # gelu => add_1, erf, mul, mul_1, mul_2 # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {}) triton_poi_fused_add_gelu_0 = async_compile.triton('triton_poi_fused_add_gelu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865476 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tl.store(out_ptr0 + (x0), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, gelu], Original ATen: [aten.add, aten.gelu] stream0 = get_raw_stream(0) triton_poi_fused_add_gelu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleGeluModule(torch.nn.Module): def forward(self, tensor): return F.gelu(tensor + tensor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865476 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_gelu_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleGeluModuleNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleGeluModule
false
7,394
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleCosModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vy/cvykguexawt5en6rd6un27jr5ssgrr5nfe4fek2lsqgpfda2qjl2.py # Topologically Sorted Source Nodes: [add, cos], Original ATen: [aten.add, aten.cos] # Source node to ATen node mapping: # add => add # cos => cos # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%add,), kwargs = {}) triton_poi_fused_add_cos_0 = async_compile.triton('triton_poi_fused_add_cos_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cos_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.cos(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, cos], Original ATen: [aten.add, aten.cos] stream0 = get_raw_stream(0) triton_poi_fused_add_cos_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleCosModule(torch.nn.Module): def __init__(self): super(SimpleCosModule, self).__init__() def forward(self, a): return torch.cos(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_cos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.cos(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_cos_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleCosModuleNew(torch.nn.Module): def __init__(self): super(SimpleCosModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleCosModule
false
7,395
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleMaxModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/6l/c6lid7didg6rn4ppoifz2te7odbgr6ljaae2lzzsff3c2lylgtyg.py # Topologically Sorted Source Nodes: [add, add_1, max_1], Original ATen: [aten.add, aten.maximum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # max_1 => maximum # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %arg1_1), kwargs = {}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%add, %add_1), kwargs = {}) triton_poi_fused_add_maximum_0 = async_compile.triton('triton_poi_fused_add_maximum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_maximum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_maximum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = triton_helpers.maximum(tmp1, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, add_1, max_1], Original ATen: [aten.add, aten.maximum] stream0 = get_raw_stream(0) triton_poi_fused_add_maximum_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleMaxModule(torch.nn.Module): def __init__(self): super(SimpleMaxModule, self).__init__() def forward(self, a, b): return torch.max(a + a, b + b) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_maximum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = triton_helpers.maximum(tmp1, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_maximum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleMaxModuleNew(torch.nn.Module): def __init__(self): super(SimpleMaxModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleMaxModule
false
7,396
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleFloorModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/j6/cj6yrxveidc7xf7aw2sd5chrhx3pw3r5egsdoorepnl2ey2ejho7.py # Topologically Sorted Source Nodes: [c, floor], Original ATen: [aten.add, aten.floor] # Source node to ATen node mapping: # c => add # floor => floor # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %floor : [num_users=1] = call_function[target=torch.ops.aten.floor.default](args = (%add,), kwargs = {}) triton_poi_fused_add_floor_0 = async_compile.triton('triton_poi_fused_add_floor_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_floor_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_floor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c, floor], Original ATen: [aten.add, aten.floor] stream0 = get_raw_stream(0) triton_poi_fused_add_floor_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleFloorModule(torch.nn.Module): def forward(self, a, b): c = a + b return torch.floor(c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_floor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = libdevice.floor(tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_floor_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleFloorModuleNew(torch.nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleFloorModule
false
7,397
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleConv2dModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ud/cudtupp4xbsxvl5czwt3p2pj3cknjnhtp6x45zymsucnyg3xzdnf.py # Topologically Sorted Source Nodes: [conv], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/p7/cp7mgrng2aoot3kokspvn2sifs3rykgl5mktnpxnmb7yc57vcvab.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Topologically Sorted Source Nodes: [conv], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(arg1_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Topologically Sorted Source Nodes: [conv], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(arg0_1, buf1, 16, 16, grid=grid(16, 16), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [conv], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 4, 4)) del buf0 del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, 16, grid=grid(16), stream=stream0) return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleConv2dModule(torch.nn.Module): def __init__(self, stride=1, padding=0, dilation=1, groups=1): super(SimpleConv2dModule, self).__init__() self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups def forward(self, inputs, filters, bias=None): conv = F.conv2d(inputs, filters, bias=bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) return F.relu(conv) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_convolution_0[grid(16, 16)](arg0_1, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 4, 4)) del buf0 del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf2 triton_poi_fused_relu_1[grid(16)](buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf3, class SimpleConv2dModuleNew(torch.nn.Module): def __init__(self, stride=1, padding=0, dilation=1, groups=1): super(SimpleConv2dModuleNew, self).__init__() self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleConv2dModule
false
7,398
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleConvTranspose2dModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ud/cudtupp4xbsxvl5czwt3p2pj3cknjnhtp6x45zymsucnyg3xzdnf.py # Topologically Sorted Source Nodes: [convTranspose], Original ATen: [aten.convolution] # Source node to ATen node mapping: # convTranspose => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [1, 1], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = (yindex // 4) tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask) tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/kt/ckt6gbx6lb7bibkgo7yxy7qvikxdttfdrwxhv4n3kzjb445guwpa.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 64], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (196*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x2 + (49*y3)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Topologically Sorted Source Nodes: [convTranspose], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(arg1_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Topologically Sorted Source Nodes: [convTranspose], Original ATen: [aten.convolution] triton_poi_fused_convolution_0.run(arg0_1, buf1, 16, 16, grid=grid(16, 16), stream=stream0) del arg0_1 # Topologically Sorted Source Nodes: [convTranspose], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 7, 7), (196, 1, 28, 4)) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, buf3, 16, 49, grid=grid(16, 49), stream=stream0) del buf2 return (buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleConvTranspose2dModule(torch.nn.Module): def __init__(self, stride=1, padding=0, output_padding=0, dilation=1, groups=1): super(SimpleConvTranspose2dModule, self).__init__() self.stride = stride self.padding = padding self.output_padding = output_padding self.groups = groups self.dilation = dilation def forward(self, inputs, filters, bias=None): convTranspose = F.conv_transpose2d(inputs, filters, bias=bias, stride=self.stride, padding=self.padding, output_padding=self. output_padding, groups=self.groups, dilation=self.dilation) return F.relu(convTranspose) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_relu_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 49 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 196 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + (x2 + 49 * y3), tmp2, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 16)](arg1_1, buf0, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_convolution_0[grid(16, 16)](arg0_1, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 buf2 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 7, 7), (196, 1, 28, 4)) del buf0 del buf1 buf3 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) triton_poi_fused_relu_1[grid(16, 49)](buf2, buf3, 16, 49, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf2 return buf3, class SimpleConvTranspose2dModuleNew(torch.nn.Module): def __init__(self, stride=1, padding=0, output_padding=0, dilation=1, groups=1): super(SimpleConvTranspose2dModuleNew, self).__init__() self.stride = stride self.padding = padding self.output_padding = output_padding self.groups = groups self.dilation = dilation def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleConvTranspose2dModule
false
7,399
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleCumSumModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3c/c3cpu3wz3bo3cso4qi4ii34iyrr3niuqsvkxdjr6hbwpvju6c4oe.py # Topologically Sorted Source Nodes: [cumsum], Original ATen: [aten.cumsum] # Source node to ATen node mapping: # cumsum => cumsum # Graph fragment: # %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%arg0_1, 4), kwargs = {}) triton_per_fused_cumsum_0 = async_compile.triton('triton_per_fused_cumsum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton_heuristics.persistent_reduction( size_hints=[256, 4], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 256 rnumel = 4 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0) tmp1 = tmp0.to(tl.float32) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + (4*x0)), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [cumsum], Original ATen: [aten.cumsum] stream0 = get_raw_stream(0) triton_per_fused_cumsum_0.run(arg0_1, buf0, 256, 4, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleCumSumModule(torch.nn.Module): def __init__(self, dim): super(SimpleCumSumModule, self).__init__() self.dim = dim def forward(self, tensor): return torch.cumsum(tensor, self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl .constexpr): xnumel = 256 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tmp0.to(tl.float32) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp3, = tl.associative_scan((tmp2,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_0[grid(256)](arg0_1, buf0, 256, 4, XBLOCK= 32, num_warps=2, num_stages=1) del arg0_1 return buf0, class SimpleCumSumModuleNew(torch.nn.Module): def __init__(self, dim): super(SimpleCumSumModuleNew, self).__init__() self.dim = dim def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleCumSumModule
false
7,400
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleLogModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xc/cxcdfiik5xgk7a7m5pmyyveyug5a3vuczvqoixbtd5nchlbaz5qr.py # Topologically Sorted Source Nodes: [b, log_1], Original ATen: [aten.log] # Source node to ATen node mapping: # b => log # log_1 => log_1 # Graph fragment: # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg0_1,), kwargs = {}) # %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%log,), kwargs = {}) triton_poi_fused_log_0 = async_compile.triton('triton_poi_fused_log_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_log_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_log_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl_math.log(tmp0) tmp2 = tl_math.log(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [b, log_1], Original ATen: [aten.log] stream0 = get_raw_stream(0) triton_poi_fused_log_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleLogModule(torch.nn.Module): def __init__(self, *dimensions): super(SimpleLogModule, self).__init__() def forward(self, a): b = torch.log(a) return torch.log(b) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.log(tmp0) tmp2 = tl_math.log(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleLogModuleNew(torch.nn.Module): def __init__(self, *dimensions): super(SimpleLogModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleLogModule
false
7,401
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleFmodModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/iu/ciuedv4oj5epb6bq36sca4tmg3x777o3yghi7exu4n6otp7lp34w.py # Topologically Sorted Source Nodes: [c, fmod_1], Original ATen: [aten.fmod] # Source node to ATen node mapping: # c => fmod # fmod_1 => fmod_1 # Graph fragment: # %fmod : [num_users=1] = call_function[target=torch.ops.aten.fmod.Tensor](args = (%arg1_1, %arg0_1), kwargs = {}) # %fmod_1 : [num_users=1] = call_function[target=torch.ops.aten.fmod.Scalar](args = (%fmod, 1.0), kwargs = {}) triton_poi_fused_fmod_0 = async_compile.triton('triton_poi_fused_fmod_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_fmod_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_fmod_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = libdevice.fmod(tmp0, tmp1) tmp3 = 1.0 tmp4 = libdevice.fmod(tmp2, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [c, fmod_1], Original ATen: [aten.fmod] stream0 = get_raw_stream(0) triton_poi_fused_fmod_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleFmodModule(torch.nn.Module): def __init__(self): super(SimpleFmodModule, self).__init__() def forward(self, a, b): if b.size() == torch.Size([]): c = a.fmod(b.item()) else: c = a.fmod(b) return c.fmod(1.0) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_fmod_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = libdevice.fmod(tmp0, tmp1) tmp3 = 1.0 tmp4 = libdevice.fmod(tmp2, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_fmod_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleFmodModuleNew(torch.nn.Module): def __init__(self): super(SimpleFmodModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleFmodModule
false
7,402
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
Foo
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/qp/cqp7ueqlgbxahbgap5at7kbdkd7h2xxgbchrupcpkji4bdn4nejg.py # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] # Source node to ATen node mapping: # x => convolution # x_1 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 92256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3844) % 6 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x3), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/rv/crvxf6xnscpjmln6astlfsozus7rnqpbhexgiojxcqezrzslczlf.py # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] # Source node to ATen node mapping: # y => convolution_1 # Graph fragment: # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 3600) % 16 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (6, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (6, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 6, 3, 3), (54, 9, 3, 1)) assert_size_stride(primals_5, (16, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 62, 62), (23064, 3844, 62, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 92256, grid=grid(92256), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 60, 60), (57600, 3600, 60, 1)) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf3, primals_5, 230400, grid=grid(230400), stream=stream0) del primals_5 return (buf3, primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((6, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((16, 6, 3, 3), (54, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class Foo(torch.nn.Module): def __init__(self): super(Foo, self).__init__() self.conv1 = torch.nn.Conv2d(3, 6, 3) self.relu = torch.nn.ReLU() self.conv2 = torch.nn.Conv2d(6, 16, 3) def forward(self, x): x = self.conv1(x) x = self.relu(x) y = self.conv2(x) return y def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 92256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3844 % 6 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (6, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (16, 6, 3, 3), (54, 9, 3, 1)) assert_size_stride(primals_5, (16,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 62, 62), (23064, 3844, 62, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(92256)](buf1, primals_2, 92256, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 60, 60), (57600, 3600, 60, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(230400)](buf3, primals_5, 230400, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class FooNew(torch.nn.Module): def __init__(self): super(FooNew, self).__init__() self.conv1 = torch.nn.Conv2d(3, 6, 3) self.relu = torch.nn.ReLU() self.conv2 = torch.nn.Conv2d(6, 16, 3) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
opti-mix/glow
Foo
false
7,403
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleNotModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/cd/ccdsblr3257brbmhrvhdeqget7q2gbkwwz35ze4s55arhkureuea.py # Topologically Sorted Source Nodes: [b, logical_not_1], Original ATen: [aten.logical_not] # Source node to ATen node mapping: # b => logical_not # logical_not_1 => logical_not_1 # Graph fragment: # %logical_not : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%arg0_1,), kwargs = {}) # %logical_not_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%logical_not,), kwargs = {}) triton_poi_fused_logical_not_0 = async_compile.triton('triton_poi_fused_logical_not_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logical_not_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = (tmp0 != 0) tmp2 = tmp1 == 0 tmp3 = tmp2 == 0 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [b, logical_not_1], Original ATen: [aten.logical_not] stream0 = get_raw_stream(0) triton_poi_fused_logical_not_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleNotModule(torch.nn.Module): def __init__(self): super(SimpleNotModule, self).__init__() def forward(self, a): b = torch.logical_not(a) return torch.logical_not(b) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_not_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 != 0 tmp2 = tmp1 == 0 tmp3 = tmp2 == 0 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_not_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleNotModuleNew(torch.nn.Module): def __init__(self): super(SimpleNotModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleNotModule
false
7,404
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleMatmulModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ng/cnggtmai2hzxc7e5creviqseyyf7qiy5pfpdjlp2pomqsserjuzj.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %arg1_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg1_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1) del arg0_1 del buf0 return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleMatmulModule(torch.nn.Module): def __init__(self): super(SimpleMatmulModule, self).__init__() def forward(self, a, b): return a.matmul(b + b) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg1_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg1_1 buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out =buf1) del arg0_1 del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), class SimpleMatmulModuleNew(torch.nn.Module): def __init__(self): super(SimpleMatmulModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleMatmulModule
false
7,405
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleLinearModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ph/cphyvqksaznjc5f5gstivhj5vszkuncctuzvaegazln3taw555sz.py # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] # Source node to ATen node mapping: # add => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, linear], Original ATen: [aten.add, aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1) del arg1_1 del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleLinearModule(torch.nn.Module): def __init__(self): super(SimpleLinearModule, self).__init__() def forward(self, input, weight, bias=None): return F.linear(input + input, weight, bias) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf1) del arg1_1 del buf0 return buf1, class SimpleLinearModuleNew(torch.nn.Module): def __init__(self): super(SimpleLinearModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleLinearModule
false
7,406
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleOrModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/el/cel6bpplfk6uwqmcfj3bzthez5xrkrefn3chlle2rnmbe23xgscv.py # Topologically Sorted Source Nodes: [c, logical_or_1], Original ATen: [aten.logical_or] # Source node to ATen node mapping: # c => logical_or # logical_or_1 => logical_or_1 # Graph fragment: # %logical_or : [num_users=1] = call_function[target=torch.ops.aten.logical_or.default](args = (%arg1_1, %arg0_1), kwargs = {}) # %logical_or_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_or.default](args = (%logical_or, %logical_or), kwargs = {}) triton_poi_fused_logical_or_0 = async_compile.triton('triton_poi_fused_logical_or_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logical_or_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_logical_or_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = (tmp0 != 0) tmp3 = (tmp2 != 0) tmp4 = tmp1 | tmp3 tmp5 = tmp4 | tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [c, logical_or_1], Original ATen: [aten.logical_or] stream0 = get_raw_stream(0) triton_poi_fused_logical_or_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleOrModule(torch.nn.Module): def __init__(self): super(SimpleOrModule, self).__init__() def forward(self, a, b): c = torch.logical_or(a, b) return torch.logical_or(c, c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_or_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 != 0 tmp3 = tmp2 != 0 tmp4 = tmp1 | tmp3 tmp5 = tmp4 | tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_or_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleOrModuleNew(torch.nn.Module): def __init__(self): super(SimpleOrModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleOrModule
false
7,407
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleMulModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/us/cusp54q34go4owlw52vntqwnfud2kj3mzyijjppr27wa7benuz7r.py # Topologically Sorted Source Nodes: [other, mul_1], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # other => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %mul), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [other, mul_1], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleMulModule(torch.nn.Module): def __init__(self): super(SimpleMulModule, self).__init__() def forward(self, left, right): other = left.mul(right.item() if right.size() == torch.Size([]) else right) return other.mul(other) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleMulModuleNew(torch.nn.Module): def __init__(self): super(SimpleMulModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleMulModule
false
7,408
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleMinModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/is/cisvhtrbgx7geszywb4tws4xgixibtohmtf6243rcq7k7rvlgexs.py # Topologically Sorted Source Nodes: [add, add_1, min_1], Original ATen: [aten.add, aten.minimum] # Source node to ATen node mapping: # add => add # add_1 => add_1 # min_1 => minimum # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %arg1_1), kwargs = {}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%add, %add_1), kwargs = {}) triton_poi_fused_add_minimum_0 = async_compile.triton('triton_poi_fused_add_minimum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_minimum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_minimum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = triton_helpers.minimum(tmp1, tmp3) tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, add_1, min_1], Original ATen: [aten.add, aten.minimum] stream0 = get_raw_stream(0) triton_poi_fused_add_minimum_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleMinModule(torch.nn.Module): def __init__(self): super(SimpleMinModule, self).__init__() def forward(self, a, b): return torch.min(a + a, b + b) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_minimum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 + tmp0 tmp3 = tmp2 + tmp2 tmp4 = triton_helpers.minimum(tmp1, tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_minimum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleMinModuleNew(torch.nn.Module): def __init__(self): super(SimpleMinModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleMinModule
false
7,409
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleReciprocalModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/mb/cmb22rikzfzsbog4swdz4yuwqyxobyyc7jycz5a5bbxu4fvunisq.py # Topologically Sorted Source Nodes: [other, reciprocal], Original ATen: [aten.add, aten.reciprocal] # Source node to ATen node mapping: # other => add # reciprocal => reciprocal # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) triton_poi_fused_add_reciprocal_0 = async_compile.triton('triton_poi_fused_add_reciprocal_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = tl.full([1], 1, tl.int32) tmp3 = tmp2 / tmp1 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [other, reciprocal], Original ATen: [aten.add, aten.reciprocal] stream0 = get_raw_stream(0) triton_poi_fused_add_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleReciprocalModel(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReciprocalModel, self).__init__() self.inplace = inplace def forward(self, tensor): other = tensor + tensor return other.reciprocal_() if self.inplace else torch.reciprocal(other) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl.full([1], 1, tl.int32) tmp3 = tmp2 / tmp1 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleReciprocalModelNew(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReciprocalModelNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleReciprocalModel
false
7,410
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleReluModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ll/cll4cjpzu6vsmy2t6yfmvdki2efgptzlcycbgoibjk65mj7ireqn.py # Topologically Sorted Source Nodes: [other, relu_1], Original ATen: [aten.relu] # Source node to ATen node mapping: # other => relu # relu_1 => relu_1 # Graph fragment: # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {}) # %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%relu,), kwargs = {}) triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = triton_helpers.maximum(tmp1, tmp2) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [other, relu_1], Original ATen: [aten.relu] stream0 = get_raw_stream(0) triton_poi_fused_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleReluModel(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReluModel, self).__init__() self.inplace = inplace def forward(self, tensor): other = F.relu(tensor, inplace=self.inplace) return F.relu(other, inplace=self.inplace) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = triton_helpers.maximum(tmp1, tmp2) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleReluModelNew(torch.nn.Module): def __init__(self, inplace=False): super(SimpleReluModelNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleReluModel
false
7,411
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleTypeasModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/oo/coo6fior4gtmedodyysb4cm6xgshrmes4qlz3fctq5hnz2fimegz.py # Topologically Sorted Source Nodes: [tensor, add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_1 => add_1 # tensor => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = tmp1 + tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tensor, add_1], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleTypeasModel(torch.nn.Module): def __init__(self): super(SimpleTypeasModel, self).__init__() def forward(self, tensor, other=None): other = tensor if other is None else other if tensor.dtype != torch.bool: tensor = tensor + tensor typed = tensor.type_as(other) return typed + typed def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tmp1 + tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleTypeasModelNew(torch.nn.Module): def __init__(self): super(SimpleTypeasModelNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleTypeasModel
false
7,412
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleLogSoftmaxModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/xe/cxeq77gbpevhf6jov7fs3c25pvswzi43xn2bxfthg2nvsuurswra.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [4], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/nj/cnjj3kjcokm5rrbv6azeg2i2dkelsepqzurxngdhwjbc5vp6wfpj.py # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_softmax => exp, log, sub_1, sum_1 # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [4], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_1.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleLogSoftmaxModel(torch.nn.Module): def __init__(self, dimension): super(SimpleLogSoftmaxModel, self).__init__() self.dimension = dimension def forward(self, tensor): return F.log_softmax(tensor, self.dimension) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return buf1, class SimpleLogSoftmaxModelNew(torch.nn.Module): def __init__(self, dimension): super(SimpleLogSoftmaxModelNew, self).__init__() self.dimension = dimension def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleLogSoftmaxModel
false
7,413
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleReshapeModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2j/c2ju6nxawlrauguem6bximg5swv34phnnk2hdvtnx2xovvh5goee.py # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.add] # Source node to ATen node mapping: # combined => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [combined], Original ATen: [aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleReshapeModel(torch.nn.Module): def __init__(self, shape): super(SimpleReshapeModel, self).__init__() self.shape = shape def forward(self, tensor): combined = tensor + tensor return combined.reshape(self.shape) def get_inputs(): return [torch.rand([4])] def get_init_inputs(): return [[], {'shape': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_add_0[grid(4)](arg0_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return buf0, class SimpleReshapeModelNew(torch.nn.Module): def __init__(self, shape): super(SimpleReshapeModelNew, self).__init__() self.shape = shape def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleReshapeModel
false
7,414
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleSumModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rt/crtaa7jk7byqntpikbqibiqclpj3ph3k3g4gm3y22pcpf5wsrsn3.py # Topologically Sorted Source Nodes: [b, sum_1], Original ATen: [aten.add, aten.sum] # Source node to ATen node mapping: # b => add # sum_1 => sum_1 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {}) triton_per_fused_add_sum_0 = async_compile.triton('triton_per_fused_add_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_sum_0(in_ptr0, out_ptr0, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tmp0 + tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp4, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [b, sum_1], Original ATen: [aten.add, aten.sum] stream0 = get_raw_stream(0) triton_per_fused_add_sum_0.run(arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleSumModule(torch.nn.Module): def __init__(self, dtype=None): super(SimpleSumModule, self).__init__() self.dtype = dtype def forward(self, a): b = a + a return torch.sum(b, dtype=self.dtype) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_sum_0(in_ptr0, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tmp0 + tmp0 tmp2 = tl.broadcast_to(tmp1, [RBLOCK]) tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp4, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_sum_0[grid(1)](arg0_1, buf0, 1, 256, num_warps =2, num_stages=1) del arg0_1 return buf0, class SimpleSumModuleNew(torch.nn.Module): def __init__(self, dtype=None): super(SimpleSumModuleNew, self).__init__() self.dtype = dtype def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleSumModule
false
7,415
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleXorModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2t/c2t7awkb47lv4n57sfx4u7jomyzx3q7qvtzhxrgvtwk3vcwxkfhp.py # Topologically Sorted Source Nodes: [c, logical_xor_1], Original ATen: [aten.logical_xor] # Source node to ATen node mapping: # c => logical_xor # logical_xor_1 => logical_xor_1 # Graph fragment: # %logical_xor : [num_users=1] = call_function[target=torch.ops.aten.logical_xor.default](args = (%arg1_1, %arg0_1), kwargs = {}) # %logical_xor_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_xor.default](args = (%logical_xor, %logical_xor), kwargs = {}) triton_poi_fused_logical_xor_0 = async_compile.triton('triton_poi_fused_logical_xor_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_logical_xor_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_logical_xor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp1 = (tmp0 != 0) tmp3 = (tmp2 != 0) tmp4 = (tmp1 ^ tmp3) tmp5 = (tmp4 ^ tmp4) tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [c, logical_xor_1], Original ATen: [aten.logical_xor] stream0 = get_raw_stream(0) triton_poi_fused_logical_xor_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleXorModule(torch.nn.Module): def __init__(self): super(SimpleXorModule, self).__init__() def forward(self, a, b): c = torch.logical_xor(a, b) return torch.logical_xor(c, c) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_logical_xor_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tmp0 != 0 tmp3 = tmp2 != 0 tmp4 = tmp1 ^ tmp3 tmp5 = tmp4 ^ tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_logical_xor_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleXorModuleNew(torch.nn.Module): def __init__(self): super(SimpleXorModuleNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleXorModule
false
7,416
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimplePowModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/th/cthfcnqcrhqllimklewhtwlw3kynsejtajhc7eunrob3gyhbmsze.py # Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow] # Source node to ATen node mapping: # pow_1 => pow_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {}) triton_poi_fused_pow_0 = async_compile.triton('triton_poi_fused_pow_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 * tmp0 tmp2 = tmp1 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow] stream0 = get_raw_stream(0) triton_poi_fused_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimplePowModule(torch.nn.Module): def __init__(self, power): super(SimplePowModule, self).__init__() self.power = power def forward(self, tensor): return torch.pow(tensor, self.power) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'power': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 * tmp0 tmp2 = tmp1 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimplePowModuleNew(torch.nn.Module): def __init__(self, power): super(SimplePowModuleNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimplePowModule
false
7,417
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleSinModule
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/um/cumfo6xlwcf7gajmq7aavjn4b3q6favglcetyv2n73qurfxutgrk.py # Topologically Sorted Source Nodes: [add, sin], Original ATen: [aten.add, aten.sin] # Source node to ATen node mapping: # add => add # sin => sin # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%add,), kwargs = {}) triton_poi_fused_add_sin_0 = async_compile.triton('triton_poi_fused_add_sin_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.sin(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, sin], Original ATen: [aten.add, aten.sin] stream0 = get_raw_stream(0) triton_poi_fused_add_sin_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleSinModule(torch.nn.Module): def __init__(self): super(SimpleSinModule, self).__init__() def forward(self, a): return torch.sin(a + a) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = tl_math.sin(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_sin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleSinModuleNew(torch.nn.Module): def __init__(self): super(SimpleSinModuleNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleSinModule
false
7,418
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleStackModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/r2/cr2ft4qgpkoihvmquzfvb2qj2z7n5lsy5sfodvtjb5ijcg3hxzff.py # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] # Source node to ATen node mapping: # stack => cat # Graph fragment: # %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], 4), kwargs = {}) triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = (xindex // 2) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 + tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tl.store(out_ptr0 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack] stream0 = get_raw_stream(0) triton_poi_fused_stack_0.run(arg1_1, arg0_1, buf0, 512, grid=grid(512), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleStackModel(torch.nn.Module): def __init__(self, dim): super(SimpleStackModel, self).__init__() self.dim = dim def forward(self, a, b): c = b + b return torch.stack((a, c), dim=self.dim) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 2, tl.int64) tmp9 = tl.load(in_ptr1 + x1, tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp9 + tmp9 tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype) tmp12 = tl.where(tmp6, tmp10, tmp11) tmp13 = tl.where(tmp4, tmp5, tmp12) tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 2), (128, 32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_stack_0[grid(512)](arg1_1, arg0_1, buf0, 512, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class SimpleStackModelNew(torch.nn.Module): def __init__(self, dim): super(SimpleStackModelNew, self).__init__() self.dim = dim def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
opti-mix/glow
SimpleStackModel
false
7,419
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleTanhModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/bv/cbvw7afmr2daqq3xv6rtsmbu5jcnt46e5lw3xedxgr4bybwozsyg.py # Topologically Sorted Source Nodes: [tensor, tanh], Original ATen: [aten.add, aten.tanh] # Source node to ATen node mapping: # tanh => tanh # tensor => add # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg0_1), kwargs = {}) # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {}) triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.tanh(tmp1) tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [tensor, tanh], Original ATen: [aten.add, aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_add_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.jit import torch.onnx import torch.nn class SimpleTanhModel(torch.nn.Module): def __init__(self, inplace=False): super(SimpleTanhModel, self).__init__() self.inplace = inplace def forward(self, tensor): tensor = tensor + tensor return tensor.tanh_() if self.inplace else tensor.tanh() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tmp0 + tmp0 tmp2 = libdevice.tanh(tmp1) tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class SimpleTanhModelNew(torch.nn.Module): def __init__(self, inplace=False): super(SimpleTanhModelNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleTanhModel
false
7,420
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
SimpleSoftmaxModel
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ef/cef5jl2dffibrzdgvry2syqh3nv4y45hqkgzbp7rs7to3eijjxsa.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [4], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/wx/cwx2kruo4gzyioj66hb76yw4vgc4lxjk7wwvv5hwx3fp7vkj4o6n.py # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] # Source node to ATen node mapping: # softmax => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [4], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_raw_stream(0) triton_poi_fused__softmax_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) del buf0 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn.functional as F import torch.jit import torch.onnx import torch.nn class SimpleSoftmaxModel(torch.nn.Module): def __init__(self, dimension): super(SimpleSoftmaxModel, self).__init__() self.dimension = dimension def forward(self, tensor): return F.softmax(tensor, self.dimension) def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'dimension': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.jit import torch.onnx import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(1024)](buf0, buf1, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del buf0 return buf1, class SimpleSoftmaxModelNew(torch.nn.Module): def __init__(self, dimension): super(SimpleSoftmaxModelNew, self).__init__() self.dimension = dimension def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
opti-mix/glow
SimpleSoftmaxModel
false
7,421
[ "Apache-2.0" ]
1
4ba074df5da9822986a23a6679ab592c22660f6d
https://github.com/opti-mix/glow/tree/4ba074df5da9822986a23a6679ab592c22660f6d
HardKumaBinarizer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rs/crsqcexxk6yerceqnf6qa4jqmipzzsvegtsy5g4honvx4bpify5w.py # Topologically Sorted Source Nodes: [sub, exp_1, add, truediv, pow_1, sub_1, exp, add_1, truediv_1, k, mul, t, t_1], Original ATen: [aten.rsub, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.pow, aten.hardtanh] # Source node to ATen node mapping: # add => add # add_1 => add_1 # exp => exp # exp_1 => exp_1 # k => pow_2 # mul => mul_2 # pow_1 => pow_1 # sub => sub # sub_1 => sub_1 # t => add_2 # t_1 => clamp_max, clamp_min # truediv => mul, reciprocal # truediv_1 => mul_1, reciprocal_1 # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %rand), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg1_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1e-08), kwargs = {}) # %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%sub, %mul), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %pow_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1e-08), kwargs = {}) # %reciprocal_1 : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add_1,), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal_1, 1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%sub_1, %mul_1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, 1.2000000000000002), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, -0.1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_2, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {}) triton_poi_fused_add_exp_hardtanh_mul_pow_reciprocal_rsub_0 = async_compile.triton('triton_poi_fused_add_exp_hardtanh_mul_pow_reciprocal_rsub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_hardtanh_mul_pow_reciprocal_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_exp_hardtanh_mul_pow_reciprocal_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp3 = tl.load(in_ptr0 + (x0), xmask) tmp12 = tl.load(in_ptr1 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tl_math.exp(tmp3) tmp5 = 1e-08 tmp6 = tmp4 + tmp5 tmp7 = tl.full([1], 1, tl.int32) tmp8 = tmp7 / tmp6 tmp9 = tmp8 * tmp1 tmp10 = libdevice.pow(tmp2, tmp9) tmp11 = tmp1 - tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp13 + tmp5 tmp15 = tmp7 / tmp14 tmp16 = tmp15 * tmp1 tmp17 = libdevice.pow(tmp11, tmp16) tmp18 = 1.2000000000000002 tmp19 = tmp17 * tmp18 tmp20 = -0.1 tmp21 = tmp19 + tmp20 tmp22 = 0.0 tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = triton_helpers.minimum(tmp23, tmp1) tl.store(in_out_ptr0 + (x0), tmp24, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [u], Original ATen: [aten.rand_like] buf0 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [sub, exp_1, add, truediv, pow_1, sub_1, exp, add_1, truediv_1, k, mul, t, t_1], Original ATen: [aten.rsub, aten.exp, aten.add, aten.reciprocal, aten.mul, aten.pow, aten.hardtanh] stream0 = get_raw_stream(0) triton_poi_fused_add_exp_hardtanh_mul_pow_reciprocal_rsub_0.run(buf2, arg1_1, arg0_1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim def kuma_reparametrization(a, b): u = torch.rand_like(a) k = (1 - (1 - u) ** (1 / (b + 1e-08))) ** (1 / (a + 1e-08)) return k class Rectifier(nn.Module): def __init__(self, l=-0.1, r=1.1): super().__init__() self.l = l self.r = r self.eps = 1e-07 def forward(self, x, l=None, r=None): l = l if l is not None else self.l r = r if r is not None else self.r t = l + (r - l) * x t = torch.nn.functional.hardtanh(t, 0, 1) return t class HardKumaBinarizer(nn.Module): def __init__(self, l=-0.1, r=1.1): super().__init__() self.rectifier = Rectifier(l, r) def forward(self, a, b, l=None, r=None): k = kuma_reparametrization(torch.exp(a), torch.exp(b)) t = self.rectifier(k, l, r) return t def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch import device import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_exp_hardtanh_mul_pow_reciprocal_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr0 + x0, xmask) tmp12 = tl.load(in_ptr1 + x0, xmask) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tl_math.exp(tmp3) tmp5 = 1e-08 tmp6 = tmp4 + tmp5 tmp7 = tl.full([1], 1, tl.int32) tmp8 = tmp7 / tmp6 tmp9 = tmp8 * tmp1 tmp10 = libdevice.pow(tmp2, tmp9) tmp11 = tmp1 - tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp13 + tmp5 tmp15 = tmp7 / tmp14 tmp16 = tmp15 * tmp1 tmp17 = libdevice.pow(tmp11, tmp16) tmp18 = 1.2000000000000002 tmp19 = tmp17 * tmp18 tmp20 = -0.1 tmp21 = tmp19 + tmp20 tmp22 = 0.0 tmp23 = triton_helpers.maximum(tmp21, tmp22) tmp24 = triton_helpers.minimum(tmp23, tmp1) tl.store(in_out_ptr0 + x0, tmp24, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.rand.default([4, 4, 4, 4], dtype=torch. float32, device=device(type='cuda', index=0), pin_memory=False) buf1 = buf0 del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_exp_hardtanh_mul_pow_reciprocal_rsub_0[grid(256)]( buf2, arg1_1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf2, def kuma_reparametrization(a, b): u = torch.rand_like(a) k = (1 - (1 - u) ** (1 / (b + 1e-08))) ** (1 / (a + 1e-08)) return k class Rectifier(nn.Module): def __init__(self, l=-0.1, r=1.1): super().__init__() self.l = l self.r = r self.eps = 1e-07 def forward(self, x, l=None, r=None): l = l if l is not None else self.l r = r if r is not None else self.r t = l + (r - l) * x t = torch.nn.functional.hardtanh(t, 0, 1) return t class HardKumaBinarizerNew(nn.Module): def __init__(self, l=-0.1, r=1.1): super().__init__() self.rectifier = Rectifier(l, r) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ovechkinVT/SkipRNN
HardKumaBinarizer
false
7,422
[ "MIT" ]
1
7c1f37349d464b1b6bf8835520abad22b199f1ad
https://github.com/ovechkinVT/SkipRNN/tree/7c1f37349d464b1b6bf8835520abad22b199f1ad
L1Loss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/5u/c5ugctirrdx7vwsb6vcdnsk3ju2zzivmjmo2w2s56lxqbfmgbhnh.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.sub, aten.abs, aten.mean] # Source node to ATen node mapping: # loss => abs_1, mean, sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {}) triton_per_fused_abs_mean_sub_0 = async_compile.triton('triton_per_fused_abs_mean_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_abs_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.sub, aten.abs, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_abs_mean_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch import torch class L1Loss(nn.Module): def __init__(self): super().__init__() def forward(self, Yp, Yt): num = Yt.size(0) Yp = Yp.view(num, -1) Yt = Yt.view(num, -1) loss = nn.functional.l1_loss(Yp, Yt) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch import torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_mean_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = 256.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_mean_sub_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class L1LossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
oskarnatan/RGBDVS-fusion
L1Loss
false
7,423
[ "MIT" ]
1
5e560f54442d387a86e3a469107cf65859693987
https://github.com/oskarnatan/RGBDVS-fusion/tree/5e560f54442d387a86e3a469107cf65859693987
HuberLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sd/csdwhvb6tswj4q3idoyf4s3sk5tk27eharhfemurqkxurk5q4tra.py # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.smooth_l1_loss] # Source node to ATen node mapping: # loss => abs_1, div, lt, mean, mul, pow_1, sub, sub_1, where # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %view_1), kwargs = {}) # %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 0.5), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 0.5), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.25), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {}) triton_per_fused_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_smooth_l1_loss_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp1 = tl.load(in_ptr1 + (r0), None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 0.5 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = tmp6 * tmp4 tmp8 = 2.0 tmp9 = tmp7 * tmp8 tmp10 = 0.25 tmp11 = tmp3 - tmp10 tmp12 = tl.where(tmp5, tmp9, tmp11) tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [loss], Original ATen: [aten.smooth_l1_loss] stream0 = get_raw_stream(0) triton_per_fused_smooth_l1_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch import torch class HuberLoss(nn.Module): def __init__(self): super().__init__() def forward(self, Yp, Yt): num = Yt.size(0) Yp = Yp.view(num, -1) Yt = Yt.view(num, -1) loss = nn.functional.smooth_l1_loss(Yp, Yt, beta=0.5) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn from torch import torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 0.5 tmp5 = tmp3 < tmp4 tmp6 = tmp3 * tmp3 tmp7 = tmp6 * tmp4 tmp8 = 2.0 tmp9 = tmp7 * tmp8 tmp10 = 0.25 tmp11 = tmp3 - tmp10 tmp12 = tl.where(tmp5, tmp9, tmp11) tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HuberLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
oskarnatan/RGBDVS-fusion
HuberLoss
false
7,424
[ "MIT" ]
1
5e560f54442d387a86e3a469107cf65859693987
https://github.com/oskarnatan/RGBDVS-fusion/tree/5e560f54442d387a86e3a469107cf65859693987
BCEDiceLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/rf/crfj2rfiuw6stksdk3ng3qvcetigsculcl6ryyksi47nlhtormfb.py # Topologically Sorted Source Nodes: [bce, mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, truediv, dice_loss, bce_dice_loss], Original ATen: [aten.binary_cross_entropy, aten.mul, aten.sum, aten.add, aten.div, aten.rsub] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # bce => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1 # bce_dice_loss => add_3 # dice_loss => sub_2 # intersection => sum_1 # mul => mul_2 # mul_1 => mul_3 # sum_2 => sum_2 # sum_3 => sum_3 # truediv => div # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 1), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %maximum_1), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1e-07), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {}) # %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1e-07), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %sub_2), kwargs = {}) triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp3 * tmp0 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tl.broadcast_to(tmp3, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = tl.broadcast_to(tmp0, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp15 / tmp26 tmp28 = 2.0 tmp29 = tmp19 * tmp28 tmp30 = 1e-07 tmp31 = tmp29 + tmp30 tmp32 = tmp22 + tmp25 tmp33 = tmp32 + tmp30 tmp34 = tmp31 / tmp33 tmp35 = tmp1 - tmp34 tmp36 = tmp27 + tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp36, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [bce, mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, truediv, dice_loss, bce_dice_loss], Original ATen: [aten.binary_cross_entropy, aten.mul, aten.sum, aten.add, aten.div, aten.rsub] stream0 = get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0.run(buf4, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch import torch class BCEDiceLoss(nn.Module): def __init__(self): super().__init__() def forward(self, Yp, Yt, smooth=1e-07): num = Yt.size(0) Yp = Yp.view(num, -1) Yt = Yt.view(num, -1) bce = nn.functional.binary_cross_entropy(Yp, Yt) intersection = (Yp * Yt).sum() dice_loss = 1 - (2.0 * intersection + smooth) / (Yp.sum() + Yt.sum( ) + smooth) bce_dice_loss = bce + dice_loss return bce_dice_loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn from torch import torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = tmp3 * tmp0 tmp17 = tl.broadcast_to(tmp16, [RBLOCK]) tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0)) tmp20 = tl.broadcast_to(tmp3, [RBLOCK]) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0)) tmp23 = tl.broadcast_to(tmp0, [RBLOCK]) tmp25 = triton_helpers.promote_to_tensor(tl.sum(tmp23, 0)) tmp26 = 256.0 tmp27 = tmp15 / tmp26 tmp28 = 2.0 tmp29 = tmp19 * tmp28 tmp30 = 1e-07 tmp31 = tmp29 + tmp30 tmp32 = tmp22 + tmp25 tmp33 = tmp32 + tmp30 tmp34 = tmp31 / tmp33 tmp35 = tmp1 - tmp34 tmp36 = tmp27 + tmp35 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp36, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf4 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0[grid(1)]( buf4, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf4, class BCEDiceLossNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
oskarnatan/RGBDVS-fusion
BCEDiceLoss
false
7,425
[ "MIT" ]
1
5e560f54442d387a86e3a469107cf65859693987
https://github.com/oskarnatan/RGBDVS-fusion/tree/5e560f54442d387a86e3a469107cf65859693987
UnpoolAvgEquiangular
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/iq/ciqv7bi5u5n5lklll775de7zs2dribgtfk5mjtugtmgprxav7lxa.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._unsafe_index, aten.clone] # Source node to ATen node mapping: # x_1 => _unsafe_index, clone # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%permute, [None, None, %unsqueeze, %convert_element_type_3]), kwargs = {}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%_unsafe_index,), kwargs = {memory_format: torch.channels_last}) triton_poi_fused__unsafe_index_clone_0 = async_compile.triton('triton_poi_fused__unsafe_index_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = (xindex // 32) % 2 x1 = (xindex // 4) % 8 x0 = xindex % 4 x3 = (xindex // 64) x5 = xindex tmp0 = x2 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (x0 + (4*tmp8) + (16*x3)), xmask) tl.store(out_ptr0 + (x5), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 8), (64, 1, 32, 4), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._unsafe_index, aten.clone] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch.nn import functional as F def equiangular_dimension_unpack(nodes, ratio): """Calculate the two underlying dimensions from the total number of nodes Args: nodes (int): combined dimensions ratio (float): ratio between the two dimensions Returns: int, int: separated dimensions """ dim1 = int((nodes / ratio) ** 0.5) dim2 = int((nodes * ratio) ** 0.5) if dim1 * dim2 != nodes: if nodes % dim1 == 0: dim2 = nodes // dim1 if nodes % dim2 == 0: dim1 = nodes // dim2 assert dim1 * dim2 == nodes, f'Unable to unpack nodes: {nodes}, ratio: {ratio}' return dim1, dim2 def equiangular_calculator(tensor, ratio): N, M, F = tensor.size() dim1, dim2 = equiangular_dimension_unpack(M, ratio) tensor = tensor.view(N, dim1, dim2, F) return tensor def reformat(x): """Reformat the input from a 4D tensor to a 3D tensor Args: x (:obj:`torch.tensor`): a 4D tensor Returns: :obj:`torch.tensor`: a 3D tensor """ x = x.permute(0, 2, 3, 1) N, D1, D2, Feat = x.size() x = x.view(N, D1 * D2, Feat) return x class UnpoolAvgEquiangular(torch.nn.Module): """EquiAngular average unpooling Parameters ---------- ratio : float Parameter for equiangular sampling -> width/height """ def __init__(self, ratio, kernel_size, *args, **kwargs): self.ratio = ratio self.kernel_size = int(kernel_size ** 0.5) super().__init__() def forward(self, inputs, *args): """calls pytorch's interpolate function to create the values while unpooling based on the nearby values Parameters ---------- inputs : torch.tensor of shape batch x pixels x features Input data Returns ------- x : torch.tensor of shape batch x unpooled pixels x features Layer output """ x = equiangular_calculator(inputs, self.ratio) x = x.permute(0, 3, 1, 2) x = F.interpolate(x, scale_factor=(self.kernel_size, self. kernel_size), mode='nearest') x = reformat(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'ratio': 4, 'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 32 % 2 x1 = xindex // 4 % 8 x0 = xindex % 4 x3 = xindex // 64 x5 = xindex tmp0 = x2 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp3.to(tl.int32) tmp5 = x1 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (x0 + 4 * tmp8 + 16 * x3), xmask) tl.store(out_ptr0 + x5, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 8), (64, 1, 32, 4), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), def equiangular_dimension_unpack(nodes, ratio): """Calculate the two underlying dimensions from the total number of nodes Args: nodes (int): combined dimensions ratio (float): ratio between the two dimensions Returns: int, int: separated dimensions """ dim1 = int((nodes / ratio) ** 0.5) dim2 = int((nodes * ratio) ** 0.5) if dim1 * dim2 != nodes: if nodes % dim1 == 0: dim2 = nodes // dim1 if nodes % dim2 == 0: dim1 = nodes // dim2 assert dim1 * dim2 == nodes, f'Unable to unpack nodes: {nodes}, ratio: {ratio}' return dim1, dim2 def equiangular_calculator(tensor, ratio): N, M, F = tensor.size() dim1, dim2 = equiangular_dimension_unpack(M, ratio) tensor = tensor.view(N, dim1, dim2, F) return tensor def reformat(x): """Reformat the input from a 4D tensor to a 3D tensor Args: x (:obj:`torch.tensor`): a 4D tensor Returns: :obj:`torch.tensor`: a 3D tensor """ x = x.permute(0, 2, 3, 1) N, D1, D2, Feat = x.size() x = x.view(N, D1 * D2, Feat) return x class UnpoolAvgEquiangularNew(torch.nn.Module): """EquiAngular average unpooling Parameters ---------- ratio : float Parameter for equiangular sampling -> width/height """ def __init__(self, ratio, kernel_size, *args, **kwargs): self.ratio = ratio self.kernel_size = int(kernel_size ** 0.5) super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ownzonefeng/weather_prediction
UnpoolAvgEquiangular
false
7,426
[ "MIT" ]
1
723c02b6b3c0a40751d87572b66c7a4e040dec92
https://github.com/ownzonefeng/weather_prediction/tree/723c02b6b3c0a40751d87572b66c7a4e040dec92
UnpoolAvgHealpix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/25/c25uvewxtb2cqewuyqopwqrakp2ojb5q4mux7wbn3ftnkz4vvhzr.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._unsafe_index] # Source node to ATen node mapping: # x_1 => _unsafe_index # Graph fragment: # %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%permute, [None, None, %convert_element_type_1]), kwargs = {}) triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) % 4 x2 = (xindex // 64) x3 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.25 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = tl.load(in_ptr0 + (x1 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._unsafe_index] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_index_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 16, 4), (64, 1, 16), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class UnpoolAvgHealpix(torch.nn.Module): """Healpix Average Unpooling module Parameters ---------- kernel_size : int Pooling kernel width """ def __init__(self, kernel_size, *args, **kwargs): """kernel_size should be 4, 16, 64, etc.""" super().__init__() self.kernel_size = kernel_size def extra_repr(self): return 'kernel_size={kernel_size}'.format(**self.__dict__) def forward(self, x, *args): """x has shape (batch, pixels, channels) and is in nested ordering""" x = x.permute(0, 2, 1) x = torch.nn.functional.interpolate(x, scale_factor=self. kernel_size, mode='nearest') return x.permute(0, 2, 1) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.25 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = tl.load(in_ptr0 + (x1 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 4), (64, 1, 16), 0), class UnpoolAvgHealpixNew(torch.nn.Module): """Healpix Average Unpooling module Parameters ---------- kernel_size : int Pooling kernel width """ def __init__(self, kernel_size, *args, **kwargs): """kernel_size should be 4, 16, 64, etc.""" super().__init__() self.kernel_size = kernel_size def extra_repr(self): return 'kernel_size={kernel_size}'.format(**self.__dict__) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ownzonefeng/weather_prediction
UnpoolAvgHealpix
false
7,427
[ "MIT" ]
1
723c02b6b3c0a40751d87572b66c7a4e040dec92
https://github.com/ownzonefeng/weather_prediction/tree/723c02b6b3c0a40751d87572b66c7a4e040dec92
IOUScore
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wa/cwazyy64v7y3sgk7yrmxl3a6dpgpv4z2nj2vj2pppkyrieeps7ac.py # Topologically Sorted Source Nodes: [output_, target_, and_, intersection, or_, union, iou], Original ATen: [aten.gt, aten.bitwise_and, aten.sum, aten.bitwise_or, aten.div] # Source node to ATen node mapping: # and_ => bitwise_and # intersection => sum_1 # iou => div # or_ => bitwise_or # output_ => gt # target_ => gt_1 # union => sum_2 # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0.5), kwargs = {}) # %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg1_1, 0.5), kwargs = {}) # %bitwise_and : [num_users=1] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt, %gt_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%bitwise_and,), kwargs = {}) # %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%gt, %gt_1), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%bitwise_or,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {}) triton_per_fused_bitwise_and_bitwise_or_div_gt_sum_0 = async_compile.triton('triton_per_fused_bitwise_and_bitwise_or_div_gt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_bitwise_and_bitwise_or_div_gt_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_bitwise_and_bitwise_or_div_gt_sum_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 0.5 tmp2 = tmp0 > tmp1 tmp4 = tmp3 > tmp1 tmp5 = tmp2 & tmp4 tmp6 = tmp5.to(tl.int64) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tmp2 | tmp4 tmp11 = tmp10.to(tl.int64) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = tmp9.to(tl.float32) tmp16 = tmp14.to(tl.float32) tmp17 = tmp15 / tmp16 tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) # Topologically Sorted Source Nodes: [output_, target_, and_, intersection, or_, union, iou], Original ATen: [aten.gt, aten.bitwise_and, aten.sum, aten.bitwise_or, aten.div] stream0 = get_raw_stream(0) triton_per_fused_bitwise_and_bitwise_or_div_gt_sum_0.run(arg0_1, arg1_1, buf2, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn from torch import torch class IOUScore(nn.Module): def __init__(self): super().__init__() def forward(self, Yp, Yt): output_ = Yp > 0.5 target_ = Yt > 0.5 intersection = (output_ & target_).sum() union = (output_ | target_).sum() iou = intersection / union return iou def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn from torch import torch assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_bitwise_and_bitwise_or_div_gt_sum_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 0.5 tmp2 = tmp0 > tmp1 tmp4 = tmp3 > tmp1 tmp5 = tmp2 & tmp4 tmp6 = tmp5.to(tl.int64) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp10 = tmp2 | tmp4 tmp11 = tmp10.to(tl.int64) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = tmp9.to(tl.float32) tmp16 = tmp14.to(tl.float32) tmp17 = tmp15 / tmp16 tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_bitwise_and_bitwise_or_div_gt_sum_0[grid(1)](arg0_1, arg1_1, buf2, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class IOUScoreNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
oskarnatan/RGBDVS-fusion
IOUScore
false
7,428
[ "MIT" ]
1
5e560f54442d387a86e3a469107cf65859693987
https://github.com/oskarnatan/RGBDVS-fusion/tree/5e560f54442d387a86e3a469107cf65859693987
Critic
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sm/csm4ofalq42npqq7fv6jo3il6ujywmjwqnazwa5z35h4asxel7vx.py # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] # Source node to ATen node mapping: # cat => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 68 x1 = (xindex // 68) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tmp13 = tl.full([1], 68, tl.int64) tmp14 = tmp0 < tmp13 tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-64) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/b7/cb7yiqdigd2vu5it7f2y6axob3bgvkx2ecs3nmymezsrlxsu2jhl.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {}) # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/it/cit4qjb7wmwrbvv2rtchpn3duppvfiyliqnz2jz3tymwbqqane7m.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_2), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (32, 68), (68, 1)) assert_size_stride(primals_6, (32, ), (1, )) assert_size_stride(primals_7, (1, 32), (32, 1)) assert_size_stride(primals_8, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 68), (68, 1), torch.float32) # Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 272, grid=grid(272), stream=stream0) del primals_4 buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (68, 32), (1, 68), 0), out=buf2) buf3 = buf2; del buf2 # reuse # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf3, primals_6, 128, grid=grid(128), stream=stream0) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm] extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 64), (64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_2.run(buf0, primals_2, buf6, 256, grid=grid(256), stream=stream0) del buf0 del primals_2 return (buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, 68), (68, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn import torch.autograd def fanin_(size): fan_in = size[0] weight = 1.0 / np.sqrt(fan_in) return torch.Tensor(size).uniform_(-weight, weight) class Critic(nn.Module): def __init__(self, state_dim, action_dim, h1=64, h2=32, init_w=0.003): super(Critic, self).__init__() self.linear1 = nn.Linear(state_dim, h1) self.linear1.weight.data = fanin_(self.linear1.weight.data.size()) self.linear2 = nn.Linear(h1 + action_dim, h2) self.linear2.weight.data = fanin_(self.linear2.weight.data.size()) self.linear3 = nn.Linear(h2, 1) self.linear3.weight.data.uniform_(-init_w, init_w) self.relu = nn.ReLU() def forward(self, state, action): x = self.linear1(state) x = self.relu(x) x = self.linear2(torch.cat([x, action], 1)) x = self.relu(x) x = self.linear3(x) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np from torch import nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 68 x1 = xindex // 68 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 64, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 68, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-64 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (32, 68), (68, 1)) assert_size_stride(primals_6, (32,), (1,)) assert_size_stride(primals_7, (1, 32), (32, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 64), (64, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 68), (68, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(272)](buf0, primals_2, primals_4, buf1, 272, XBLOCK=128, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 32), (32, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (68, 32), (1, 68), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(128)](buf3, primals_6, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 64), (64, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf0, primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def fanin_(size): fan_in = size[0] weight = 1.0 / np.sqrt(fan_in) return torch.Tensor(size).uniform_(-weight, weight) class CriticNew(nn.Module): def __init__(self, state_dim, action_dim, h1=64, h2=32, init_w=0.003): super(CriticNew, self).__init__() self.linear1 = nn.Linear(state_dim, h1) self.linear1.weight.data = fanin_(self.linear1.weight.data.size()) self.linear2 = nn.Linear(h1 + action_dim, h2) self.linear2.weight.data = fanin_(self.linear2.weight.data.size()) self.linear3 = nn.Linear(h2, 1) self.linear3.weight.data.uniform_(-init_w, init_w) self.relu = nn.ReLU() def forward(self, input_0, input_1): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_5 = self.linear2.weight primals_6 = self.linear2.bias primals_7 = self.linear3.weight primals_8 = self.linear3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
ori-goals/lfd-min-human-effort
Critic
false
7,429
[ "MIT" ]
1
f9fd70cdeb661151e5f81ac538ceb865531146b9
https://github.com/ori-goals/lfd-min-human-effort/tree/f9fd70cdeb661151e5f81ac538ceb865531146b9
HintonBinarizer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/sk/cskfzr7unt3o6lbqp52hiw4lbz3jnzq4jdg3fczqxgirkqifl766.py # Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy] # Source node to ATen node mapping: # float_1 => convert_element_type # gt => gt # Graph fragment: # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0.5), kwargs = {}) # %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {}) triton_poi_fused__to_copy_gt_0 = async_compile.triton('triton_poi_fused__to_copy_gt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_gt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy] stream0 = get_raw_stream(0) triton_poi_fused__to_copy_gt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class hinton_binarize(torch.autograd.Function): """ Binarize function from the paper 'SKIP RNN: LEARNING TO SKIP STATE UPDATES IN RECURRENT NEURAL NETWORKS' https://openreview.net/forum?id=HkwVAXyCW Works as round function but has a unit gradient: Binarize(x) := (x > 0.5).float() d Binarize(x) / dx := 1 """ @staticmethod def forward(ctx, x, threshold=0.5): return (x > threshold).float() @staticmethod def backward(ctx, grad_output): return grad_output, None class HintonBinarizer(nn.Module): """ Binarize function from the paper 'SKIP RNN: LEARNING TO SKIP STATE UPDATES IN RECURRENT NEURAL NETWORKS' https://openreview.net/forum?id=HkwVAXyCW Works as round function but has a unit gradient: Binarize(x) := (x > 0.5).float() d Binarize(x) / dx := 1 """ def __init__(self, threshold=0.5): super().__init__() self.threshold = threshold def forward(self, x, threshold=None): threshold = threshold if threshold is not None else self.threshold return hinton_binarize.apply(x, threshold) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_gt_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class hinton_binarize(torch.autograd.Function): """ Binarize function from the paper 'SKIP RNN: LEARNING TO SKIP STATE UPDATES IN RECURRENT NEURAL NETWORKS' https://openreview.net/forum?id=HkwVAXyCW Works as round function but has a unit gradient: Binarize(x) := (x > 0.5).float() d Binarize(x) / dx := 1 """ @staticmethod def forward(ctx, x, threshold=0.5): return (x > threshold).float() @staticmethod def backward(ctx, grad_output): return grad_output, None class HintonBinarizerNew(nn.Module): """ Binarize function from the paper 'SKIP RNN: LEARNING TO SKIP STATE UPDATES IN RECURRENT NEURAL NETWORKS' https://openreview.net/forum?id=HkwVAXyCW Works as round function but has a unit gradient: Binarize(x) := (x > 0.5).float() d Binarize(x) / dx := 1 """ def __init__(self, threshold=0.5): super().__init__() self.threshold = threshold def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ovechkinVT/SkipRNN
HintonBinarizer
false
7,430
[ "MIT" ]
1
7c1f37349d464b1b6bf8835520abad22b199f1ad
https://github.com/ovechkinVT/SkipRNN/tree/7c1f37349d464b1b6bf8835520abad22b199f1ad
torch_return_int8_argmax
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/n4/cn4szll4copwskp64zgti6qhmul4ybjdz6ghrbyoz573ejmmepyb.py # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] # Source node to ATen node mapping: # max_1 => getitem_1 # Graph fragment: # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 1), kwargs = {}) triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp17 = tl.load(in_ptr0 + (128 + x0), xmask) tmp32 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tmp45 = tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + (x0), tmp46, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) # Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max] stream0 = get_raw_stream(0) triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class torch_return_int8_argmax(torch.nn.Module): def __init__(self): super(torch_return_int8_argmax, self).__init__() def forward(self, x): x0 = x.squeeze(0) _, x1 = torch.max(x0, 0) return x1 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (64 + x0), xmask) tmp17 = tl.load(in_ptr0 + (128 + x0), xmask) tmp32 = tl.load(in_ptr0 + (192 + x0), xmask) tmp2 = tmp0 > tmp1 tmp3 = tmp0 == tmp1 tmp4 = tmp0 != tmp0 tmp5 = tmp1 != tmp1 tmp6 = tmp4 > tmp5 tmp7 = tmp2 | tmp6 tmp8 = tmp4 & tmp5 tmp9 = tmp3 | tmp8 tmp10 = tl.full([1], 0, tl.int64) tmp11 = tl.full([1], 1, tl.int64) tmp12 = tmp10 < tmp11 tmp13 = tmp9 & tmp12 tmp14 = tmp7 | tmp13 tmp15 = tl.where(tmp14, tmp0, tmp1) tmp16 = tl.where(tmp14, tmp10, tmp11) tmp18 = tmp15 > tmp17 tmp19 = tmp15 == tmp17 tmp20 = tmp15 != tmp15 tmp21 = tmp17 != tmp17 tmp22 = tmp20 > tmp21 tmp23 = tmp18 | tmp22 tmp24 = tmp20 & tmp21 tmp25 = tmp19 | tmp24 tmp26 = tl.full([1], 2, tl.int64) tmp27 = tmp16 < tmp26 tmp28 = tmp25 & tmp27 tmp29 = tmp23 | tmp28 tmp30 = tl.where(tmp29, tmp15, tmp17) tmp31 = tl.where(tmp29, tmp16, tmp26) tmp33 = tmp30 > tmp32 tmp34 = tmp30 == tmp32 tmp35 = tmp30 != tmp30 tmp36 = tmp32 != tmp32 tmp37 = tmp35 > tmp36 tmp38 = tmp33 | tmp37 tmp39 = tmp35 & tmp36 tmp40 = tmp34 | tmp39 tmp41 = tl.full([1], 3, tl.int64) tmp42 = tmp31 < tmp41 tmp43 = tmp40 & tmp42 tmp44 = tmp38 | tmp43 tl.where(tmp44, tmp30, tmp32) tmp46 = tl.where(tmp44, tmp31, tmp41) tl.store(out_ptr0 + x0, tmp46, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class torch_return_int8_argmaxNew(torch.nn.Module): def __init__(self): super(torch_return_int8_argmaxNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ozendelait/pytorch-semseg
torch_return_int8_argmax
false
7,431
[ "MIT" ]
1
200491febd653bd26befcd5b3d52c614aa832b7e
https://github.com/ozendelait/pytorch-semseg/tree/200491febd653bd26befcd5b3d52c614aa832b7e
Actor
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/54/c546inlectt6zvbpgn5qhxi6h2mqgwz227jurnrzfeistnsnjut6.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_3 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_5 => tanh # Graph fragment: # %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {}) triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64), (64, 1)) assert_size_stride(primals_5, (32, ), (1, )) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 4096, grid=grid(4096), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf2 # reuse buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 2048, grid=grid(2048), stream=stream0) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.tanh] triton_poi_fused_tanh_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 32), (32, 1), 0), buf5, primals_6, buf6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((32, 64), (64, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import numpy as np from torch import nn import torch.autograd def fanin_(size): fan_in = size[0] weight = 1.0 / np.sqrt(fan_in) return torch.Tensor(size).uniform_(-weight, weight) class Actor(nn.Module): def __init__(self, state_dim, action_dim, h1=64, h2=32, init_w=0.003): super(Actor, self).__init__() self.linear1 = nn.Linear(state_dim, h1) self.linear1.weight.data = fanin_(self.linear1.weight.data.size()) self.linear2 = nn.Linear(h1, h2) self.linear2.weight.data = fanin_(self.linear2.weight.data.size()) self.linear3 = nn.Linear(h2, action_dim) self.linear3.weight.data.uniform_(-init_w, init_w) self.relu = nn.ReLU() self.tanh = nn.Tanh() cuda = torch.cuda.is_available() self.device = torch.device('cuda' if cuda else 'cpu') def forward(self, state): x = self.linear1(state) x = self.relu(x) x = self.linear2(x) x = self.relu(x) x = self.linear3(x) x = self.tanh(x) return x def get_action(self, state): state = torch.FloatTensor(state).unsqueeze(0) action = self.forward(state) return action.detach().cpu().numpy()[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_dim': 4, 'action_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from torch import nn import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4), (4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (32, 64), (64, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (4, 32), (32, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool ) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1, primals_2, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 32), (32, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 32), (1, 64), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 32), (512, 128, 32, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(2048)](buf3, primals_5, buf6, 2048, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 32), (32, 1), 0), reinterpret_tensor(primals_6, (32, 4), (1, 32), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor( buf3, (64, 32), (32, 1), 0), buf5, primals_6, buf6, primals_4, buf7 def fanin_(size): fan_in = size[0] weight = 1.0 / np.sqrt(fan_in) return torch.Tensor(size).uniform_(-weight, weight) class ActorNew(nn.Module): def __init__(self, state_dim, action_dim, h1=64, h2=32, init_w=0.003): super(ActorNew, self).__init__() self.linear1 = nn.Linear(state_dim, h1) self.linear1.weight.data = fanin_(self.linear1.weight.data.size()) self.linear2 = nn.Linear(h1, h2) self.linear2.weight.data = fanin_(self.linear2.weight.data.size()) self.linear3 = nn.Linear(h2, action_dim) self.linear3.weight.data.uniform_(-init_w, init_w) self.relu = nn.ReLU() self.tanh = nn.Tanh() cuda = torch.cuda.is_available() self.device = torch.device('cuda' if cuda else 'cpu') def get_action(self, state): state = torch.FloatTensor(state).unsqueeze(0) action = self.forward(state) return action.detach().cpu().numpy()[0] def forward(self, input_0): primals_1 = self.linear1.weight primals_2 = self.linear1.bias primals_4 = self.linear2.weight primals_5 = self.linear2.bias primals_6 = self.linear3.weight primals_7 = self.linear3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ori-goals/lfd-min-human-effort
Actor
false
7,432
[ "MIT" ]
1
f9fd70cdeb661151e5f81ac538ceb865531146b9
https://github.com/ori-goals/lfd-min-human-effort/tree/f9fd70cdeb661151e5f81ac538ceb865531146b9
Rectifier
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/jt/cjttk7o6na747j6wchwxpm7fvq5ogr7qe7htgmdltxiyrnbm4e33.py # Topologically Sorted Source Nodes: [mul, t, t_1], Original ATen: [aten.mul, aten.add, aten.hardtanh] # Source node to ATen node mapping: # mul => mul # t => add # t_1 => clamp_max, clamp_min # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.2000000000000002), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -0.1), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {}) # %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {}) triton_poi_fused_add_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_hardtanh_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.2000000000000002 tmp2 = tmp0 * tmp1 tmp3 = -0.1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, t, t_1], Original ATen: [aten.mul, aten.add, aten.hardtanh] stream0 = get_raw_stream(0) triton_poi_fused_add_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.optim class Rectifier(nn.Module): def __init__(self, l=-0.1, r=1.1): super().__init__() self.l = l self.r = r self.eps = 1e-07 def forward(self, x, l=None, r=None): l = l if l is not None else self.l r = r if r is not None else self.r t = l + (r - l) * x t = torch.nn.functional.hardtanh(t, 0, 1) return t def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.2000000000000002 tmp2 = tmp0 * tmp1 tmp3 = -0.1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = 1.0 tmp8 = triton_helpers.minimum(tmp6, tmp7) tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class RectifierNew(nn.Module): def __init__(self, l=-0.1, r=1.1): super().__init__() self.l = l self.r = r self.eps = 1e-07 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ovechkinVT/SkipRNN
Rectifier
false
7,433
[ "MIT" ]
1
7c1f37349d464b1b6bf8835520abad22b199f1ad
https://github.com/ovechkinVT/SkipRNN/tree/7c1f37349d464b1b6bf8835520abad22b199f1ad
torch_fakeint8_to_float
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wg/cwg2jpgvg7egh2kdtwwuk5fjapsjymwpr3d4jghgpngnns2hyz6g.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%unsqueeze_1,), kwargs = {memory_format: torch.contiguous_format}) # %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%arg0_1, %permute_1), kwargs = {}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask, eviction_policy='evict_last') tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = -256.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + (y0 + (16*x1)), tmp7, xmask & ymask) tl.store(out_ptr1 + (x1 + (4*y0)), tmp7, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, arg0_1, 16, 4, grid=grid(16, 4), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class torch_fakeint8_to_float(torch.nn.Module): def __init__(self): super(torch_fakeint8_to_float, self).__init__() def forward(self, x): x0 = x.permute(2, 0, 1) x0 += torch.clamp(x0, -1, 0) * -256.0 return x0.unsqueeze(0).contiguous() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask, eviction_policy= 'evict_last') tmp1 = -1.0 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = 0.0 tmp4 = triton_helpers.minimum(tmp2, tmp3) tmp5 = -256.0 tmp6 = tmp4 * tmp5 tmp7 = tmp0 + tmp6 tl.store(out_ptr0 + (y0 + 16 * x1), tmp7, xmask & ymask) tl.store(out_ptr1 + (x1 + 4 * y0), tmp7, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](arg0_1, buf0, arg0_1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class torch_fakeint8_to_floatNew(torch.nn.Module): def __init__(self): super(torch_fakeint8_to_floatNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ozendelait/pytorch-semseg
torch_fakeint8_to_float
false
7,434
[ "MIT" ]
1
200491febd653bd26befcd5b3d52c614aa832b7e
https://github.com/ozendelait/pytorch-semseg/tree/200491febd653bd26befcd5b3d52c614aa832b7e
torch_uint8_to_float_normed
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/2b/c2b2qscyt7jh2sc5mpwzpfixavplwowdf2s6xnclxvxr4cctz37r.py # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] # Source node to ATen node mapping: # contiguous => clone # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%unsqueeze,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last') tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x1 + (16*y0)), tmp2, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 4, 16, grid=grid(4, 16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch class torch_uint8_to_float_normed(torch.nn.Module): def __init__(self): super(torch_uint8_to_float_normed, self).__init__() def forward(self, x): return (x.permute(2, 0, 1) / 255.0).unsqueeze(0).contiguous() def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x1 + 16 * y0), tmp2, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 16)](arg0_1, buf0, 4, 16, XBLOCK= 16, YBLOCK=4, num_warps=1, num_stages=1) del arg0_1 return buf0, class torch_uint8_to_float_normedNew(torch.nn.Module): def __init__(self): super(torch_uint8_to_float_normedNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ozendelait/pytorch-semseg
torch_uint8_to_float_normed
false
7,435
[ "MIT" ]
1
200491febd653bd26befcd5b3d52c614aa832b7e
https://github.com/ozendelait/pytorch-semseg/tree/200491febd653bd26befcd5b3d52c614aa832b7e
Attention
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/7e/c7eul75w5s2nxzr6mevxwrjuv2vslc5jmikyp4moxhc5kqk225qk.py # Topologically Sorted Source Nodes: [wplus_2], Original ATen: [aten.tanh] # Source node to ATen node mapping: # wplus_2 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + (x0), tmp1, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py # Topologically Sorted Source Nodes: [att_w_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_w_2 => amax, exp, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_4, [1], True), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py # Topologically Sorted Source Nodes: [att_w_2], Original ATen: [aten._softmax] # Source node to ATen node mapping: # att_w_2 => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [wplus], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [wplus_2], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [att_w], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [att_w_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [att_w_2], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [after_attention], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0), primals_1, out=buf5) return (buf5, buf4, primals_1, buf1, buf4, reinterpret_tensor(primals_3, (1, 4), (1, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import math import torch import torch.nn as nn import torch.optim import torch.nn.functional as F class Attention(nn.Module): """Attention layer - Custom layer to perform weighted average over the second axis (axis=1) Transforming a tensor of size [N, W, H] to [N, 1, H]. N: batch size W: number of words, different sentence length will need to be padded to have the same size for each mini-batch H: hidden state dimension or word embedding dimension Args: dim: The dimension of the word embedding Attributes: w: learnable weight matrix of size [dim, dim] v: learnable weight vector of size [dim] Examples:: >>> m = models_pytorch.Attention(300) >>> input = Variable(torch.randn(4, 128, 300)) >>> output = m(input) >>> print(output.size()) """ def __init__(self, dim): super(Attention, self).__init__() self.dim = dim self.att_weights = None self.w = nn.Parameter(torch.Tensor(dim, dim)) self.v = nn.Parameter(torch.Tensor(dim)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.w.size(1)) self.w.data.uniform_(-stdv, stdv) self.v.data.uniform_(-stdv, stdv) def forward(self, input): wplus = torch.mm(input.contiguous().view(-1, input.size()[2]), self.w) wplus = wplus.contiguous().view(-1, input.size()[1], self.w.size()[1]) wplus = torch.tanh(wplus) att_w = torch.mm(wplus.contiguous().view(-1, wplus.size()[2]), self .v.contiguous().view(self.v.size()[0], 1)) att_w = att_w.contiguous().view(-1, wplus.size()[1]) att_w = F.softmax(att_w, dim=1) self.att_weights = att_w after_attention = torch.bmm(att_w.unsqueeze(1), input) return after_attention def __repr__(self): return self.__class__.__name__ + ' (' + '1' + ', ' + str(self.dim ) + ')' def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(64)](buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 1), 0), out=buf2) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 1, 4), (4, 4, 1), 0) del buf3 extern_kernels.bmm(reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0 ), primals_1, out=buf5) return buf5, buf4, primals_1, buf1, buf4, reinterpret_tensor(primals_3, (1, 4), (1, 1), 0) class AttentionNew(nn.Module): """Attention layer - Custom layer to perform weighted average over the second axis (axis=1) Transforming a tensor of size [N, W, H] to [N, 1, H]. N: batch size W: number of words, different sentence length will need to be padded to have the same size for each mini-batch H: hidden state dimension or word embedding dimension Args: dim: The dimension of the word embedding Attributes: w: learnable weight matrix of size [dim, dim] v: learnable weight vector of size [dim] Examples:: >>> m = models_pytorch.Attention(300) >>> input = Variable(torch.randn(4, 128, 300)) >>> output = m(input) >>> print(output.size()) """ def __init__(self, dim): super(AttentionNew, self).__init__() self.dim = dim self.att_weights = None self.w = nn.Parameter(torch.Tensor(dim, dim)) self.v = nn.Parameter(torch.Tensor(dim)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.w.size(1)) self.w.data.uniform_(-stdv, stdv) self.v.data.uniform_(-stdv, stdv) def __repr__(self): return self.__class__.__name__ + ' (' + '1' + ', ' + str(self.dim ) + ')' def forward(self, input_0): primals_2 = self.w primals_3 = self.v primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ovechkinVT/SkipRNN
Attention
false
7,436
[ "MIT" ]
1
7c1f37349d464b1b6bf8835520abad22b199f1ad
https://github.com/ovechkinVT/SkipRNN/tree/7c1f37349d464b1b6bf8835520abad22b199f1ad
AvgLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/3z/c3zp6tzyjvuxkyhpqg37skoz3sh23okiwohqkcwueawnz24zmgzs.py # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] # Source node to ATen node mapping: # mean => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [3], True), kwargs = {}) triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn import torch.utils.data class AvgLayer(nn.Module): def forward(self, input): return input.mean(3, keepdim=True) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class AvgLayerNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
pYtoner/EasyOCR
AvgLayer
false
7,437
[ "Apache-2.0" ]
1
cbb2df77ae789dd4c7807541e0357d9a698ba801
https://github.com/pYtoner/EasyOCR/tree/cbb2df77ae789dd4c7807541e0357d9a698ba801
LearnedUtility
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/yc/cycelhrr7d5b627wwjisqr4ziuy5dylqqlk7lgr32kypwamk5f6s.py # Topologically Sorted Source Nodes: [multiply], Original ATen: [aten.mul] # Source node to ATen node mapping: # multiply => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + (x0), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, ), (1, )) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [multiply], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0) del primals_1 return (buf0, primals_2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import torch.nn as nn class LearnedUtility(nn.Module): def __init__(self, slope=0): super().__init__() self.theta_tt = torch.nn.Parameter(slope * torch.ones(1)) self.theta_tt.requiresGrad = True def forward(self, x): return torch.multiply(self.theta_tt, x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2 class LearnedUtilityNew(nn.Module): def __init__(self, slope=0): super().__init__() self.theta_tt = torch.nn.Parameter(slope * torch.ones(1)) self.theta_tt.requiresGrad = True def forward(self, input_0): primals_1 = self.theta_tt primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
pabloguarda/NeuralTransportationNetworks
LearnedUtility
false
7,438
[ "MIT" ]
1
0461c26128b09488aff237b760068b43d131f8a9
https://github.com/pabloguarda/NeuralTransportationNetworks/tree/0461c26128b09488aff237b760068b43d131f8a9
PreNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_1 => relu # Graph fragment: # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {}) # %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/dh/cdhj4aozvvzkw7stzrqoauyoij3petwtvi4g4weydesiaurrughd.py # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # x_4 => relu_1 # Graph fragment: # %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {}) triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + (x2), tmp4, None) tl.store(out_ptr0 + (x2), tmp6, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse buf11 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 16384, grid=grid(16384), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.relu, aten.native_dropout] buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) del buf1 buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf5 # reuse buf10 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) # Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu, aten.threshold_backward] triton_poi_fused_relu_threshold_backward_1.run(buf6, primals_5, buf10, 8192, grid=grid(8192), stream=stream0) del primals_5 # Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.relu, aten.native_dropout] buf7 = torch.ops.aten.native_dropout.default(buf6, 0.5, True) del buf6 buf8 = buf7[0] buf9 = buf7[1] del buf7 return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), buf9, buf10, primals_4, buf11, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch import nn import torch.nn.functional as F class PreNet(nn.Module): def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5): super().__init__() self.fc1 = nn.Linear(in_dims, fc1_dims) self.fc2 = nn.Linear(fc1_dims, fc2_dims) self.p = dropout def forward(self, x): x = self.fc1(x) x = F.relu(x) x = F.dropout(x, self.p, training=True) x = self.fc2(x) x = F.relu(x) x = F.dropout(x, self.p, training=True) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_dims': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf11 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf11, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = torch.ops.aten.native_dropout.default(buf1, 0.5, True) del buf1 buf3 = buf2[0] buf4 = buf2[1] del buf2 buf5 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 128), (1, 256), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf5 buf10 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(8192)](buf6, primals_5, buf10, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf7 = torch.ops.aten.native_dropout.default(buf6, 0.5, True) del buf6 buf8 = buf7[0] buf9 = buf7[1] del buf7 return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), buf9, buf10, primals_4, buf11 class PreNetNew(nn.Module): def __init__(self, in_dims, fc1_dims=256, fc2_dims=128, dropout=0.5): super().__init__() self.fc1 = nn.Linear(in_dims, fc1_dims) self.fc2 = nn.Linear(fc1_dims, fc2_dims) self.p = dropout def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
padmalcom/AISpeechAssistant
PreNet
false
7,439
[ "Apache-2.0" ]
1
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
https://github.com/padmalcom/AISpeechAssistant/tree/b7501a23a8f513acb5043f3c7bb06df129bdc2cc